Changeset - cf4f87a2d85b
[Not reviewed]
0 9 1
MH - 4 years ago 2021-05-12 20:00:59
contact@maxhenger.nl
WIP on more evaluator bugs
9 files changed:
0 comments (0 inline, 0 general)
src/protocol/ast.rs
Show inline comments
 
@@ -256,768 +256,770 @@ impl Pragma {
 
        match self {
 
            Pragma::Module(pragma) => pragma,
 
            _ => unreachable!("Tried to obtain {:?} as PragmaModule", self),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct PragmaVersion {
 
    pub this: PragmaId,
 
    // Phase 1: parser
 
    pub span: InputSpan, // of full pragma
 
    pub version: u64,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct PragmaModule {
 
    pub this: PragmaId,
 
    // Phase 1: parser
 
    pub span: InputSpan, // of full pragma
 
    pub value: Identifier,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum Import {
 
    Module(ImportModule),
 
    Symbols(ImportSymbols)
 
}
 

	
 
impl Import {
 
    pub(crate) fn span(&self) -> InputSpan {
 
        match self {
 
            Import::Module(v) => v.span,
 
            Import::Symbols(v) => v.span,
 
        }
 
    }
 

	
 
    pub(crate) fn as_module(&self) -> &ImportModule {
 
        match self {
 
            Import::Module(m) => m,
 
            _ => unreachable!("Unable to cast 'Import' to 'ImportModule'")
 
        }
 
    }
 
    pub(crate) fn as_symbols(&self) -> &ImportSymbols {
 
        match self {
 
            Import::Symbols(m) => m,
 
            _ => unreachable!("Unable to cast 'Import' to 'ImportSymbols'")
 
        }
 
    }
 
    pub(crate) fn as_symbols_mut(&mut self) -> &mut ImportSymbols {
 
        match self {
 
            Import::Symbols(m) => m,
 
            _ => unreachable!("Unable to cast 'Import' to 'ImportSymbols'")
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct ImportModule {
 
    pub this: ImportId,
 
    // Phase 1: parser
 
    pub span: InputSpan,
 
    pub module: Identifier,
 
    pub alias: Identifier,
 
    pub module_id: RootId,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct AliasedSymbol {
 
    pub name: Identifier,
 
    pub alias: Option<Identifier>,
 
    pub definition_id: DefinitionId,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct ImportSymbols {
 
    pub this: ImportId,
 
    // Phase 1: parser
 
    pub span: InputSpan,
 
    pub module: Identifier,
 
    pub module_id: RootId,
 
    pub symbols: Vec<AliasedSymbol>,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct Identifier {
 
    pub span: InputSpan,
 
    pub value: StringRef<'static>,
 
}
 

	
 
impl PartialEq for Identifier {
 
    fn eq(&self, other: &Self) -> bool {
 
        return self.value == other.value
 
    }
 
}
 

	
 
impl Display for Identifier {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        write!(f, "{}", self.value.as_str())
 
    }
 
}
 

	
 
#[derive(Debug, Clone, PartialEq, Eq)]
 
pub enum ParserTypeVariant {
 
    // Special builtin, only usable by the compiler and not constructable by the
 
    // programmer
 
    Void,
 
    InputOrOutput,
 
    ArrayLike,
 
    IntegerLike,
 
    // Basic builtin
 
    Message,
 
    Bool,
 
    UInt8, UInt16, UInt32, UInt64,
 
    SInt8, SInt16, SInt32, SInt64,
 
    Character, String,
 
    // Literals (need to get concrete builtin type during typechecking)
 
    IntegerLiteral,
 
    // Marker for inference
 
    Inferred,
 
    // Builtins expecting one subsequent type
 
    Array,
 
    Input,
 
    Output,
 
    // User-defined types
 
    PolymorphicArgument(DefinitionId, usize), // usize = index into polymorphic variables
 
    Definition(DefinitionId, usize), // usize = number of following subtypes
 
}
 

	
 
impl ParserTypeVariant {
 
    pub(crate) fn num_embedded(&self) -> usize {
 
        use ParserTypeVariant::*;
 

	
 
        match self {
 
            Void | IntegerLike |
 
            Message | Bool |
 
            UInt8 | UInt16 | UInt32 | UInt64 |
 
            SInt8 | SInt16 | SInt32 | SInt64 |
 
            Character | String | IntegerLiteral |
 
            Inferred | PolymorphicArgument(_, _) =>
 
                0,
 
            ArrayLike | InputOrOutput | Array | Input | Output =>
 
                1,
 
            Definition(_, num) => *num,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct ParserTypeElement {
 
    // TODO: @cleanup, do we ever need the span of a user-defined type after
 
    //  constructing it?
 
    pub full_span: InputSpan, // full span of type, including any polymorphic arguments
 
    pub variant: ParserTypeVariant,
 
}
 

	
 
/// ParserType is a specification of a type during the parsing phase and initial
 
/// linker/validator phase of the compilation process. These types may be
 
/// (partially) inferred or represent literals (e.g. a integer whose bytesize is
 
/// not yet determined).
 
#[derive(Debug, Clone)]
 
pub struct ParserType {
 
    pub elements: Vec<ParserTypeElement>
 
}
 

	
 
impl ParserType {
 
    pub(crate) fn iter_embedded(&self, parent_idx: usize) -> ParserTypeIter {
 
        ParserTypeIter::new(&self.elements, parent_idx)
 
    }
 
}
 

	
 
/// Iterator over the embedded elements of a specific element.
 
pub struct ParserTypeIter<'a> {
 
    pub elements: &'a [ParserTypeElement],
 
    pub cur_embedded_idx: usize,
 
}
 

	
 
impl<'a> ParserTypeIter<'a> {
 
    fn new(elements: &'a [ParserTypeElement], parent_idx: usize) -> Self {
 
        debug_assert!(parent_idx < elements.len(), "parent index exceeds number of elements in ParserType");
 
        if elements[0].variant.num_embedded() == 0 {
 
            // Parent element does not have any embedded types, place
 
            // `cur_embedded_idx` at end so we will always return `None`
 
            Self{ elements, cur_embedded_idx: elements.len() }
 
        } else {
 
            // Parent element has an embedded type
 
            Self{ elements, cur_embedded_idx: parent_idx + 1 }
 
        }
 
    }
 
}
 

	
 
impl<'a> Iterator for ParserTypeIter<'a> {
 
    type Item = &'a [ParserTypeElement];
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        let elements_len = self.elements.len();
 
        if self.cur_embedded_idx >= elements_len {
 
            return None;
 
        }
 

	
 
        // Seek to the end of the subtree
 
        let mut depth = 1;
 
        let start_element = self.cur_embedded_idx;
 
        while self.cur_embedded_idx < elements_len {
 
            let cur_element = &self.elements[self.cur_embedded_idx];
 
            let depth_change = cur_element.variant.num_embedded() as i32 - 1;
 
            depth += depth_change;
 
            debug_assert!(depth >= 0, "illegally constructed ParserType: {:?}", self.elements);
 

	
 
            self.cur_embedded_idx += 1;
 
            if depth == 0 {
 
                break;
 
            }
 
        }
 

	
 
        debug_assert!(depth == 0, "illegally constructed ParserType: {:?}", self.elements);
 
        return Some(&self.elements[start_element..self.cur_embedded_idx]);
 
    }
 
}
 

	
 
/// Specifies whether the symbolic type points to an actual user-defined type,
 
/// or whether it points to a polymorphic argument within the definition (e.g.
 
/// a defined variable `T var` within a function `int func<T>()`
 
#[derive(Debug, Clone)]
 
pub enum SymbolicParserTypeVariant {
 
    Definition(DefinitionId),
 
    // TODO: figure out if I need the DefinitionId here
 
    PolyArg(DefinitionId, usize), // index of polyarg in the definition
 
}
 

	
 
/// ConcreteType is the representation of a type after resolving symbolic types
 
/// and performing type inference
 
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
 
pub enum ConcreteTypePart {
 
    // Markers for the use of polymorphic types within a procedure's body that
 
    // refer to polymorphic variables on the procedure's definition. Different
 
    // from markers in the `InferenceType`, these will not contain nested types.
 
    Marker(usize),
 
    // Special types (cannot be explicitly constructed by the programmer)
 
    Void,
 
    // Builtin types without nested types
 
    Message,
 
    Bool,
 
    UInt8, UInt16, UInt32, UInt64,
 
    SInt8, SInt16, SInt32, SInt64,
 
    Character, String,
 
    // Builtin types with one nested type
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // User defined type with any number of nested types
 
    Instance(DefinitionId, usize),
 
}
 

	
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub struct ConcreteType {
 
    pub(crate) parts: Vec<ConcreteTypePart>
 
}
 

	
 
impl Default for ConcreteType {
 
    fn default() -> Self {
 
        Self{ parts: Vec::new() }
 
    }
 
}
 

	
 
impl ConcreteType {
 
    pub(crate) fn has_marker(&self) -> bool {
 
        self.parts
 
            .iter()
 
            .any(|p| {
 
                if let ConcreteTypePart::Marker(_) = p { true } else { false }
 
            })
 
    }
 
}
 

	
 
// TODO: Remove at some point
 
#[derive(Debug, Clone, PartialEq, Eq)]
 
pub enum PrimitiveType {
 
    Unassigned,
 
    Input,
 
    Output,
 
    Message,
 
    Boolean,
 
    Byte,
 
    Short,
 
    Int,
 
    Long,
 
}
 

	
 
#[derive(Debug, Clone, PartialEq, Eq)]
 
pub struct Type {
 
    pub primitive: PrimitiveType,
 
    pub array: bool,
 
}
 

	
 
#[allow(dead_code)]
 
impl Type {
 
    pub const UNASSIGNED: Type = Type { primitive: PrimitiveType::Unassigned, array: false };
 

	
 
    pub const INPUT: Type = Type { primitive: PrimitiveType::Input, array: false };
 
    pub const OUTPUT: Type = Type { primitive: PrimitiveType::Output, array: false };
 
    pub const MESSAGE: Type = Type { primitive: PrimitiveType::Message, array: false };
 
    pub const BOOLEAN: Type = Type { primitive: PrimitiveType::Boolean, array: false };
 
    pub const BYTE: Type = Type { primitive: PrimitiveType::Byte, array: false };
 
    pub const SHORT: Type = Type { primitive: PrimitiveType::Short, array: false };
 
    pub const INT: Type = Type { primitive: PrimitiveType::Int, array: false };
 
    pub const LONG: Type = Type { primitive: PrimitiveType::Long, array: false };
 

	
 
    pub const INPUT_ARRAY: Type = Type { primitive: PrimitiveType::Input, array: true };
 
    pub const OUTPUT_ARRAY: Type = Type { primitive: PrimitiveType::Output, array: true };
 
    pub const MESSAGE_ARRAY: Type = Type { primitive: PrimitiveType::Message, array: true };
 
    pub const BOOLEAN_ARRAY: Type = Type { primitive: PrimitiveType::Boolean, array: true };
 
    pub const BYTE_ARRAY: Type = Type { primitive: PrimitiveType::Byte, array: true };
 
    pub const SHORT_ARRAY: Type = Type { primitive: PrimitiveType::Short, array: true };
 
    pub const INT_ARRAY: Type = Type { primitive: PrimitiveType::Int, array: true };
 
    pub const LONG_ARRAY: Type = Type { primitive: PrimitiveType::Long, array: true };
 
}
 

	
 
impl Display for Type {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
 
        match &self.primitive {
 
            PrimitiveType::Unassigned => {
 
                write!(f, "unassigned")?;
 
            }
 
            PrimitiveType::Input => {
 
                write!(f, "in")?;
 
            }
 
            PrimitiveType::Output => {
 
                write!(f, "out")?;
 
            }
 
            PrimitiveType::Message => {
 
                write!(f, "msg")?;
 
            }
 
            PrimitiveType::Boolean => {
 
                write!(f, "boolean")?;
 
            }
 
            PrimitiveType::Byte => {
 
                write!(f, "byte")?;
 
            }
 
            PrimitiveType::Short => {
 
                write!(f, "short")?;
 
            }
 
            PrimitiveType::Int => {
 
                write!(f, "int")?;
 
            }
 
            PrimitiveType::Long => {
 
                write!(f, "long")?;
 
            }
 
        }
 
        if self.array {
 
            write!(f, "[]")
 
        } else {
 
            Ok(())
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum Field {
 
    Length,
 
    Symbolic(FieldSymbolic),
 
}
 
impl Field {
 
    pub fn is_length(&self) -> bool {
 
        match self {
 
            Field::Length => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    pub fn as_symbolic(&self) -> &FieldSymbolic {
 
        match self {
 
            Field::Symbolic(v) => v,
 
            _ => unreachable!("attempted to get Field::Symbolic from {:?}", self)
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct FieldSymbolic {
 
    // Phase 1: Parser
 
    pub(crate) identifier: Identifier,
 
    // Phase 3: Typing
 
    // TODO: @Monomorph These fields cannot be trusted because the last time it
 
    //  was typed it may refer to a different monomorph.
 
    pub(crate) definition: Option<DefinitionId>,
 
    pub(crate) field_idx: usize,
 
}
 

	
 
#[derive(Debug, Clone, Copy)]
 
pub enum Scope {
 
    Definition(DefinitionId),
 
    Regular(BlockStatementId),
 
    Synchronous((SynchronousStatementId, BlockStatementId)),
 
}
 

	
 
impl Scope {
 
    pub fn is_block(&self) -> bool {
 
        match &self {
 
            Scope::Definition(_) => false,
 
            Scope::Regular(_) => true,
 
            Scope::Synchronous(_) => true,
 
        }
 
    }
 
    pub fn to_block(&self) -> BlockStatementId {
 
        match &self {
 
            Scope::Regular(id) => *id,
 
            Scope::Synchronous((_, id)) => *id,
 
            _ => panic!("unable to get BlockStatement from Scope")
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum VariableKind {
 
    Parameter,
 
    Local,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct Variable {
 
    pub this: VariableId,
 
    // Parsing
 
    pub kind: VariableKind,
 
    pub parser_type: ParserType,
 
    pub identifier: Identifier,
 
    // Validator/linker
 
    pub relative_pos_in_block: u32,
 
    pub unique_id_in_scope: i32, // Temporary fix until proper bytecode/asm is generated
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum Definition {
 
    Struct(StructDefinition),
 
    Enum(EnumDefinition),
 
    Union(UnionDefinition),
 
    Component(ComponentDefinition),
 
    Function(FunctionDefinition),
 
}
 

	
 
impl Definition {
 
    pub fn is_struct(&self) -> bool {
 
        match self {
 
            Definition::Struct(_) => true,
 
            _ => false
 
        }
 
    }
 
    pub(crate) fn as_struct(&self) -> &StructDefinition {
 
        match self {
 
            Definition::Struct(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'StructDefinition'"),
 
        }
 
    }
 
    pub(crate) fn as_struct_mut(&mut self) -> &mut StructDefinition {
 
        match self {
 
            Definition::Struct(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'StructDefinition'"),
 
        }
 
    }
 
    pub fn is_enum(&self) -> bool {
 
        match self {
 
            Definition::Enum(_) => true,
 
            _ => false,
 
        }
 
    }
 
    pub(crate) fn as_enum(&self) -> &EnumDefinition {
 
        match self {
 
            Definition::Enum(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'EnumDefinition'"),
 
        }
 
    }
 
    pub(crate) fn as_enum_mut(&mut self) -> &mut EnumDefinition {
 
        match self {
 
            Definition::Enum(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'EnumDefinition'"),
 
        }
 
    }
 
    pub fn is_union(&self) -> bool {
 
        match self {
 
            Definition::Union(_) => true,
 
            _ => false,
 
        }
 
    }
 
    pub(crate) fn as_union(&self) -> &UnionDefinition {
 
        match self {
 
            Definition::Union(result) => result, 
 
            _ => panic!("Unable to cast 'Definition' to 'UnionDefinition'"),
 
        }
 
    }
 
    pub(crate) fn as_union_mut(&mut self) -> &mut UnionDefinition {
 
        match self {
 
            Definition::Union(result) => result,
 
            _ => panic!("Unable to cast 'Definition' to 'UnionDefinition'"),
 
        }
 
    }
 
    pub fn is_component(&self) -> bool {
 
        match self {
 
            Definition::Component(_) => true,
 
            _ => false,
 
        }
 
    }
 
    pub(crate) fn as_component(&self) -> &ComponentDefinition {
 
        match self {
 
            Definition::Component(result) => result,
 
            _ => panic!("Unable to cast `Definition` to `Component`"),
 
        }
 
    }
 
    pub(crate) fn as_component_mut(&mut self) -> &mut ComponentDefinition {
 
        match self {
 
            Definition::Component(result) => result,
 
            _ => panic!("Unable to cast `Definition` to `Component`"),
 
        }
 
    }
 
    pub fn is_function(&self) -> bool {
 
        match self {
 
            Definition::Function(_) => true,
 
            _ => false,
 
        }
 
    }
 
    pub(crate) fn as_function(&self) -> &FunctionDefinition {
 
        match self {
 
            Definition::Function(result) => result,
 
            _ => panic!("Unable to cast `Definition` to `Function`"),
 
        }
 
    }
 
    pub(crate) fn as_function_mut(&mut self) -> &mut FunctionDefinition {
 
        match self {
 
            Definition::Function(result) => result,
 
            _ => panic!("Unable to cast `Definition` to `Function`"),
 
        }
 
    }
 
    pub fn parameters(&self) -> &Vec<VariableId> {
 
        match self {
 
            Definition::Component(def) => &def.parameters,
 
            Definition::Function(def) => &def.parameters,
 
            _ => panic!("Called parameters() on {:?}", self)
 
        }
 
    }
 
    pub fn defined_in(&self) -> RootId {
 
        match self {
 
            Definition::Struct(def) => def.defined_in,
 
            Definition::Enum(def) => def.defined_in,
 
            Definition::Union(def) => def.defined_in,
 
            Definition::Component(def) => def.defined_in,
 
            Definition::Function(def) => def.defined_in,
 
        }
 
    }
 
    pub fn identifier(&self) -> &Identifier {
 
        match self {
 
            Definition::Struct(def) => &def.identifier,
 
            Definition::Enum(def) => &def.identifier,
 
            Definition::Union(def) => &def.identifier,
 
            Definition::Component(def) => &def.identifier,
 
            Definition::Function(def) => &def.identifier,
 
        }
 
    }
 
    pub fn poly_vars(&self) -> &Vec<Identifier> {
 
        match self {
 
            Definition::Struct(def) => &def.poly_vars,
 
            Definition::Enum(def) => &def.poly_vars,
 
            Definition::Union(def) => &def.poly_vars,
 
            Definition::Component(def) => &def.poly_vars,
 
            Definition::Function(def) => &def.poly_vars,
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct StructFieldDefinition {
 
    pub span: InputSpan,
 
    pub field: Identifier,
 
    pub parser_type: ParserType,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct StructDefinition {
 
    pub this: StructDefinitionId,
 
    pub defined_in: RootId,
 
    // Phase 1: symbol scanning
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Phase 2: parsing
 
    pub fields: Vec<StructFieldDefinition>
 
}
 

	
 
impl StructDefinition {
 
    pub(crate) fn new_empty(
 
        this: StructDefinitionId, defined_in: RootId, span: InputSpan,
 
        identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self{ this, defined_in, span, identifier, poly_vars, fields: Vec::new() }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum EnumVariantValue {
 
    None,
 
    Integer(i64),
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct EnumVariantDefinition {
 
    pub identifier: Identifier,
 
    pub value: EnumVariantValue,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct EnumDefinition {
 
    pub this: EnumDefinitionId,
 
    pub defined_in: RootId,
 
    // Phase 1: symbol scanning
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Phase 2: parsing
 
    pub variants: Vec<EnumVariantDefinition>,
 
}
 

	
 
impl EnumDefinition {
 
    pub(crate) fn new_empty(
 
        this: EnumDefinitionId, defined_in: RootId, span: InputSpan,
 
        identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self{ this, defined_in, span, identifier, poly_vars, variants: Vec::new() }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum UnionVariantValue {
 
    None,
 
    Embedded(Vec<ParserType>),
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct UnionVariantDefinition {
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub value: UnionVariantValue,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct UnionDefinition {
 
    pub this: UnionDefinitionId,
 
    pub defined_in: RootId,
 
    // Phase 1: symbol scanning
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Phase 2: parsing
 
    pub variants: Vec<UnionVariantDefinition>,
 
}
 

	
 
impl UnionDefinition {
 
    pub(crate) fn new_empty(
 
        this: UnionDefinitionId, defined_in: RootId, span: InputSpan,
 
        identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self{ this, defined_in, span, identifier, poly_vars, variants: Vec::new() }
 
    }
 
}
 

	
 
#[derive(Debug, Clone, Copy)]
 
pub enum ComponentVariant {
 
    Primitive,
 
    Composite,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub struct ComponentDefinition {
 
    pub this: ComponentDefinitionId,
 
    pub defined_in: RootId,
 
    // Phase 1: symbol scanning
 
    pub span: InputSpan,
 
    pub variant: ComponentVariant,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Phase 2: parsing
 
    pub parameters: Vec<VariableId>,
 
    pub body: BlockStatementId,
 
}
 

	
 
impl ComponentDefinition {
 
    pub(crate) fn new_empty(
 
        this: ComponentDefinitionId, defined_in: RootId, span: InputSpan,
 
        variant: ComponentVariant, identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self{ 
 
            this, defined_in, span, variant, identifier, poly_vars,
 
            parameters: Vec::new(), 
 
            body: BlockStatementId::new_invalid()
 
        }
 
    }
 
}
 

	
 
// Note that we will have function definitions for builtin functions as well. In
 
// that case the span, the identifier span and the body are all invalid.
 
#[derive(Debug, Clone)]
 
pub struct FunctionDefinition {
 
    pub this: FunctionDefinitionId,
 
    pub defined_in: RootId,
 
    // Phase 1: symbol scanning
 
    pub builtin: bool,
 
    pub span: InputSpan,
 
    pub identifier: Identifier,
 
    pub poly_vars: Vec<Identifier>,
 
    // Phase 2: parsing
 
    pub return_types: Vec<ParserType>,
 
    pub parameters: Vec<VariableId>,
 
    pub body: BlockStatementId,
 
}
 

	
 
impl FunctionDefinition {
 
    pub(crate) fn new_empty(
 
        this: FunctionDefinitionId, defined_in: RootId, span: InputSpan,
 
        identifier: Identifier, poly_vars: Vec<Identifier>
 
    ) -> Self {
 
        Self {
 
            this, defined_in,
 
            builtin: false,
 
            span, identifier, poly_vars,
 
            return_types: Vec::new(),
 
            parameters: Vec::new(),
 
            body: BlockStatementId::new_invalid(),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub enum Statement {
 
    Block(BlockStatement),
 
    EndBlock(EndBlockStatement),
 
    Local(LocalStatement),
 
    Labeled(LabeledStatement),
 
    If(IfStatement),
 
    EndIf(EndIfStatement),
 
    While(WhileStatement),
 
    EndWhile(EndWhileStatement),
 
    Break(BreakStatement),
 
    Continue(ContinueStatement),
 
    Synchronous(SynchronousStatement),
 
    EndSynchronous(EndSynchronousStatement),
 
    Return(ReturnStatement),
 
    Goto(GotoStatement),
 
    New(NewStatement),
 
    Expression(ExpressionStatement),
 
}
 

	
 
impl Statement {
 
    pub fn is_block(&self) -> bool {
 
        match self {
 
            Statement::Block(_) => true,
 
            _ => false,
 
        }
 
    }
 
    pub fn as_block(&self) -> &BlockStatement {
 
        match self {
 
            Statement::Block(result) => result,
 
            _ => panic!("Unable to cast `Statement` to `BlockStatement`"),
 
        }
 
    }
 
    pub fn as_block_mut(&mut self) -> &mut BlockStatement {
 
        match self {
 
            Statement::Block(result) => result,
 
            _ => panic!("Unable to cast `Statement` to `BlockStatement`"),
 
        }
 
    }
 
    pub fn as_local(&self) -> &LocalStatement {
 
        match self {
src/protocol/eval/executor.rs
Show inline comments
 

	
 
use std::collections::VecDeque;
 

	
 
use super::value::*;
 
use super::store::*;
 
use crate::protocol::*;
 
use crate::protocol::ast::*;
 

	
 
macro_rules! debug_enabled { () => { true }; }
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(true, "exec", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(true, "exec", $format, $($args),*);
 
    };
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) enum ExprInstruction {
 
    EvalExpr(ExpressionId),
 
    PushValToFront,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Frame {
 
    definition: DefinitionId,
 
    position: StatementId,
 
    expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
}
 

	
 
impl Frame {
 
    /// Creates a new execution frame. Does not modify the stack in any way.
 
    pub fn new(heap: &Heap, definition_id: DefinitionId) -> Self {
 
        let definition = &heap[definition_id];
 
        let first_statement = match definition {
 
            Definition::Component(definition) => definition.body,
 
            Definition::Function(definition) => definition.body,
 
            _ => unreachable!("initializing frame with {:?} instead of a function/component", definition),
 
        };
 

	
 
        Frame{
 
            definition: definition_id,
 
            position: first_statement.upcast(),
 
            expr_stack: VecDeque::with_capacity(128),
 
            expr_values: VecDeque::with_capacity(128),
 
        }
 
    }
 

	
 
    /// Prepares a single expression for execution. This involves walking the
 
    /// expression tree and putting them in the `expr_stack` such that
 
    /// continuously popping from its back will evaluate the expression. The
 
    /// results of each expression will be stored by pushing onto `expr_values`.
 
    pub fn prepare_single_expression(&mut self, heap: &Heap, expr_id: ExpressionId) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear(); // May not be empty if last expression result(s) were discarded
 

	
 
        self.serialize_expression(heap, expr_id);
 
    }
 

	
 
    /// Prepares multiple expressions for execution (i.e. evaluating all
 
    /// function arguments or all elements of an array/union literal). Per
 
    /// expression this works the same as `prepare_single_expression`. However
 
    /// after each expression is evaluated we insert a `PushValToFront`
 
    /// instruction
 
    pub fn prepare_multiple_expressions(&mut self, heap: &Heap, expr_ids: &[ExpressionId]) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear();
 

	
 
        for expr_id in expr_ids {
 
            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
            self.serialize_expression(heap, *expr_id);
 
        }
 
    }
 

	
 
    /// Performs depth-first serialization of expression tree. Let's not care
 
    /// about performance for a temporary runtime implementation
 
    fn serialize_expression(&mut self, heap: &Heap, id: ExpressionId) {
 
        self.expr_stack.push_back(ExprInstruction::EvalExpr(id));
 

	
 
        match &heap[id] {
 
            Expression::Assignment(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Binding(expr) => {
 
                todo!("implement binding expression");
 
            },
 
            Expression::Conditional(expr) => {
 
                self.serialize_expression(heap, expr.test);
 
            },
 
            Expression::Binary(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Unary(expr) => {
 
                self.serialize_expression(heap, expr.expression);
 
            },
 
            Expression::Indexing(expr) => {
 
                self.serialize_expression(heap, expr.index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Slicing(expr) => {
 
                self.serialize_expression(heap, expr.from_index);
 
                self.serialize_expression(heap, expr.to_index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Select(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Literal(expr) => {
 
                // Here we only care about literals that have subexpressions
 
                match &expr.value {
 
                    Literal::Null | Literal::True | Literal::False |
 
                    Literal::Character(_) | Literal::String(_) |
 
                    Literal::Integer(_) | Literal::Enum(_) => {
 
                        // No subexpressions
 
                    },
 
                    Literal::Struct(literal) => {
 
                        for field in &literal.fields {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, field.value);
 
                        }
 
                    },
 
                    Literal::Union(literal) => {
 
                        for value_expr_id in &literal.values {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Array(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    }
 
                }
 
            },
 
            Expression::Call(expr) => {
 
                for arg_expr_id in &expr.arguments {
 
                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                    self.serialize_expression(heap, *arg_expr_id);
 
                }
 
            },
 
            Expression::Variable(expr) => {
 
                // No subexpressions
 
            }
 
        }
 
    }
 
}
 

	
 
type EvalResult = Result<EvalContinuation, ()>;
 
pub enum EvalContinuation {
 
    Stepping,
 
    Inconsistent,
 
    Terminal,
 
    SyncBlockStart,
 
    SyncBlockEnd,
 
    NewComponent(DefinitionId, ValueGroup),
 
    BlockFires(Value),
 
    BlockGet(Value),
 
    Put(Value, Value),
 
}
 

	
 
// Note: cloning is fine, methinks. cloning all values and the heap regions then
 
// we end up with valid "pointers" to heap regions.
 
#[derive(Debug, Clone)]
 
pub struct Prompt {
 
    pub(crate) frames: Vec<Frame>,
 
    pub(crate) store: Store,
 
}
 

	
 
impl Prompt {
 
    pub fn new(heap: &Heap, def: DefinitionId, args: ValueGroup) -> Self {
 
        let mut prompt = Self{
 
            frames: Vec::new(),
 
            store: Store::new(),
 
        };
 

	
 
        prompt.frames.push(Frame::new(heap, def));
 
        args.into_store(&mut prompt.store);
 

	
 
        prompt
 
    }
 

	
 
    pub(crate) fn step(&mut self, heap: &Heap, ctx: &mut EvalContext) -> EvalResult {
 
        // Helper function to transfer multiple values from the expression value
 
        // array into a heap region (e.g. constructing arrays or structs).
 
        fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos {
 
            let heap_pos = store.alloc_heap();
 

	
 
            // Do the transformation first (because Rust...)
 
            for val_idx in 0..num_values {
 
                cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone());
 
            }
 

	
 
            // And now transfer to the heap region
 
            let values = &mut store.heap_regions[heap_pos as usize].values;
 
            debug_assert!(values.is_empty());
 
            values.reserve(num_values);
 
            for _ in 0..num_values {
 
                values.push(cur_frame.expr_values.pop_front().unwrap());
 
            }
 

	
 
            heap_pos
 
        }
 

	
 
        // Checking if we're at the end of execution
 
        let cur_frame = self.frames.last_mut().unwrap();
 
        if cur_frame.position.is_invalid() {
 
            if heap[cur_frame.definition].is_function() {
 
                todo!("End of function without return, return an evaluation error");
 
            }
 
            return Ok(EvalContinuation::Terminal);
 
        }
 

	
 
        debug_log!("Taking step in '{}'", heap[cur_frame.definition].identifier().value.as_str());
 

	
 
        // Execute all pending expressions
 
        while !cur_frame.expr_stack.is_empty() {
 
            let next = cur_frame.expr_stack.pop_back().unwrap();
 
            debug_log!("Expr stack: {:?}", next);
 
            match next {
 
                ExprInstruction::PushValToFront => {
 
                    cur_frame.expr_values.rotate_right(1);
 
                },
 
                ExprInstruction::EvalExpr(expr_id) => {
 
                    let expr = &heap[expr_id];
 
                    match expr {
 
                        Expression::Assignment(expr) => {
 
                            // TODO: Stuff goes wrong here, either make these
 
                            //  utilities do the alloc/dealloc, or let it all be
 
                            //  done here.
 
                            let to = cur_frame.expr_values.pop_back().unwrap().as_ref();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 
                            // let rhs_heap_pos = rhs.get_heap_pos();
 

	
 
                            // Note: although not pretty, the assignment operator takes ownership
 
                            // of the right-hand side value when possible. So we do not drop the
 
                            // rhs's optionally owned heap data.
 
                            let rhs = self.store.read_take_ownership(rhs);
 
                            apply_assignment_operator(&mut self.store, to, expr.operation, rhs);
 
                            cur_frame.expr_values.push_back(self.store.read_copy(to));
 
                            // self.store.drop_value(rhs_heap_pos);
 
                        },
 
                        Expression::Binding(_expr) => {
 
                            todo!("Binding expression");
 
                        },
 
                        Expression::Conditional(expr) => {
 
                            // Evaluate testing expression, then extend the
 
                            // expression stack with the appropriate expression
 
                            let test_result = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                            if test_result {
 
                                cur_frame.serialize_expression(heap, expr.true_expression);
 
                            } else {
 
                                cur_frame.serialize_expression(heap, expr.false_expression);
 
                            }
 
                        },
 
                        Expression::Binary(expr) => {
 
                            let lhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_binary_operator(&mut self.store, &lhs, expr.operation, &rhs);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(lhs.get_heap_pos());
 
                            self.store.drop_value(rhs.get_heap_pos());
 
                        },
 
                        Expression::Unary(expr) => {
 
                            let val = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_unary_operator(&mut self.store, expr.operation, &val);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(val.get_heap_pos());
 
                        },
 
                        Expression::Indexing(expr) => {
 
                            // TODO: Out of bounds checking
 
                            // Evaluate index. Never heap allocated so we do
 
                            // not have to drop it.
 
                            let index = cur_frame.expr_values.pop_back().unwrap();
 
                            let index = self.store.maybe_read_ref(&index);
 

	
 
                            debug_assert!(index.is_integer());
 
                            let index = if index.is_signed_integer() {
 
                                index.as_signed_integer() as u32
 
                            } else {
 
                                index.as_unsigned_integer() as u32
 
                            };
 

	
 
                            // TODO: This is probably wrong, we're dropping the
 
                            //  heap while refering to an element...
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 
                            let subject_heap_pos = subject.get_heap_pos();
 

	
 
                            let heap_pos = match subject {
 
                            let (deallocate_heap_pos, value_to_push) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    println!("DEBUG: Called with {:?}", subject);
 
                                    let result = self.store.read_ref(value_ref);
 
                                    println!("DEBUG: And got {:?}", result);
 
                                    result.as_array()
 
                                    // Our expression stack value is a reference to something that
 
                                    // exists in the normal stack/heap. We don't want to deallocate
 
                                    // this thing. Rather we want to return a reference to it.
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = subject.as_array();
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, index)))
 
                                },
 
                                _ => {
 
                                    // Our value lives on the expression stack, hence we need to
 
                                    // clone whatever we're referring to. Then drop the subject.
 
                                    let subject_heap_pos = subject.as_array();
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, index));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed))
 
                                },
 
                                val => val.as_array(),
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(Value::Ref(ValueId::Heap(heap_pos, index)));
 
                            self.store.drop_value(subject_heap_pos);
 
                            cur_frame.expr_values.push_back(value_to_push);
 
                            self.store.drop_value(deallocate_heap_pos);
 
                        },
 
                        Expression::Slicing(expr) => {
 
                            // TODO: Out of bounds checking
 
                            todo!("implement slicing")
 
                        },
 
                        Expression::Select(expr) => {
 
                            let subject= cur_frame.expr_values.pop_back().unwrap();
 
                            let heap_pos = match &subject {
 
                                Value::Ref(value_ref) => self.store.read_ref(*value_ref).as_struct(),
 
                                subject => subject.as_struct(),
 
                            let field_idx = expr.field.as_symbolic().field_idx as u32;
 
                            // Note: same as above: clone if value lives on expr stack, simply
 
                            // refer to it if it already lives on the stack/heap.
 
                            let (deallocate_heap_pos, value_to_push) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = subject.as_struct();
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, field_idx)))
 
                                },
 
                                _ => {
 
                                    let subject_heap_pos = subject.as_struct();
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, field_idx));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed))
 
                                },
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(Value::Ref(ValueId::Heap(heap_pos, expr.field.as_symbolic().field_idx as u32)));
 
                            self.store.drop_value(subject.get_heap_pos());
 
                            cur_frame.expr_values.push_back(value_to_push);
 
                            self.store.drop_value(deallocate_heap_pos);
 
                        },
 
                        Expression::Literal(expr) => {
 
                            let value = match &expr.value {
 
                                Literal::Null => Value::Null,
 
                                Literal::True => Value::Bool(true),
 
                                Literal::False => Value::Bool(false),
 
                                Literal::Character(lit_value) => Value::Char(*lit_value),
 
                                Literal::String(lit_value) => {
 
                                    let heap_pos = self.store.alloc_heap();
 
                                    let values = &mut self.store.heap_regions[heap_pos as usize].values;
 
                                    let value = lit_value.as_str();
 
                                    debug_assert!(values.is_empty());
 
                                    values.reserve(value.len());
 
                                    for character in value.as_bytes() {
 
                                        debug_assert!(character.is_ascii());
 
                                        values.push(Value::Char(*character as char));
 
                                    }
 
                                    Value::String(heap_pos)
 
                                }
 
                                Literal::Integer(lit_value) => {
 
                                    use ConcreteTypePart as CTP;
 
                                    debug_assert_eq!(expr.concrete_type.parts.len(), 1);
 
                                    match expr.concrete_type.parts[0] {
 
                                        CTP::UInt8  => Value::UInt8(lit_value.unsigned_value as u8),
 
                                        CTP::UInt16 => Value::UInt16(lit_value.unsigned_value as u16),
 
                                        CTP::UInt32 => Value::UInt32(lit_value.unsigned_value as u32),
 
                                        CTP::UInt64 => Value::UInt64(lit_value.unsigned_value as u64),
 
                                        CTP::SInt8  => Value::SInt8(lit_value.unsigned_value as i8),
 
                                        CTP::SInt16 => Value::SInt16(lit_value.unsigned_value as i16),
 
                                        CTP::SInt32 => Value::SInt32(lit_value.unsigned_value as i32),
 
                                        CTP::SInt64 => Value::SInt64(lit_value.unsigned_value as i64),
 
                                        _ => unreachable!(),
 
                                    }
 
                                }
 
                                Literal::Struct(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.fields.len()
 
                                    );
 
                                    Value::Struct(heap_pos)
 
                                }
 
                                Literal::Enum(lit_value) => {
 
                                    Value::Enum(lit_value.variant_idx as i64)
 
                                }
 
                                Literal::Union(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.values.len()
 
                                    );
 
                                    Value::Union(lit_value.variant_idx as i64, heap_pos)
 
                                }
 
                                Literal::Array(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.len()
 
                                    );
 
                                    Value::Array(heap_pos)
 
                                }
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value);
 
                        },
 
                        Expression::Call(expr) => {
 
                            // Push a new frame. Note that all expressions have
 
                            // been pushed to the front, so they're in the order
 
                            // of the definition.
 
                            let num_args = expr.arguments.len();
 

	
 
                            // Determine stack boundaries
 
                            let cur_stack_boundary = self.store.cur_stack_boundary;
 
                            let new_stack_boundary = self.store.stack.len();
 

	
 
                            // Push new boundary and function arguments for new frame
 
                            self.store.stack.push(Value::PrevStackBoundary(cur_stack_boundary as isize));
 
                            for _ in 0..num_args {
 
                                let argument = self.store.read_take_ownership(cur_frame.expr_values.pop_front().unwrap());
 
                                self.store.stack.push(argument);
 
                            }
 

	
 
                            // Push the new frame
 
                            self.frames.push(Frame::new(heap, expr.definition));
 
                            self.store.cur_stack_boundary = new_stack_boundary;
 

	
 
                            // To simplify the logic a little bit we will now
 
                            // return and ask our caller to call us again
 
                            return Ok(EvalContinuation::Stepping);
 
                        },
 
                        Expression::Variable(expr) => {
 
                            let variable = &heap[expr.declaration.unwrap()];
 
                            cur_frame.expr_values.push_back(Value::Ref(ValueId::Stack(variable.unique_id_in_scope as StackPos)));
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        debug_log!("Frame [{:?}] at {:?}, stack size = {}", cur_frame.definition, cur_frame.position, self.store.stack.len());
 
        if debug_enabled!() {
 
            debug_log!("Stack:");
 
            for (stack_idx, stack_val) in self.store.stack.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", stack_idx, stack_val);
 
            }
 

	
 
            debug_log!("Heap:");
 
            for (heap_idx, heap_region) in self.store.heap_regions.iter().enumerate() {
 
                let is_free = self.store.free_regions.iter().any(|idx| *idx as usize == heap_idx);
 
                debug_log!("  [{:03}] in_use: {}, len: {}, vals: {:?}", heap_idx, !is_free, heap_region.values.len(), &heap_region.values);
 
            }
 
        }
 
        // No (more) expressions to evaluate. So evaluate statement (that may
 
        // depend on the result on the last evaluated expression(s))
 
        let stmt = &heap[cur_frame.position];
 
        let return_value = match stmt {
 
            Statement::Block(stmt) => {
 
                // Reserve space on stack, but also make sure excess stack space
 
                // is cleared
 
                self.store.clear_stack(stmt.first_unique_id_in_scope as usize);
 
                self.store.reserve_stack(stmt.next_unique_id_in_scope as usize);
 
                cur_frame.position = stmt.statements[0];
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndBlock(stmt) => {
 
                let block = &heap[stmt.start_block];
 
                self.store.clear_stack(block.first_unique_id_in_scope as usize);
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Memory(stmt) => {
 
                        let variable = &heap[stmt.variable];
 
                        self.store.write(ValueId::Stack(variable.unique_id_in_scope as u32), Value::Unassigned);
 

	
 
                        cur_frame.position = stmt.next;
 
                    },
 
                    LocalStatement::Channel(stmt) => {
 
                        let [from_value, to_value] = ctx.new_channel();
 
                        self.store.write(ValueId::Stack(heap[stmt.from].unique_id_in_scope as u32), from_value);
 
                        self.store.write(ValueId::Stack(heap[stmt.to].unique_id_in_scope as u32), to_value);
 

	
 
                        cur_frame.position = stmt.next;
 
                    }
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Labeled(stmt) => {
 
                cur_frame.position = stmt.body;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::If(stmt) => {
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for if statement");
 
                let test_value = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                if test_value {
 
                    cur_frame.position = stmt.true_body.upcast();
 
                } else if let Some(false_body) = stmt.false_body {
 
                    cur_frame.position = false_body.upcast();
 
                } else {
 
                    // Not true, and no false body
 
                    cur_frame.position = stmt.end_if.upcast();
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndIf(stmt) => {
 
                cur_frame.position = stmt.next;
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::While(stmt) => {
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for while statement");
 
                let test_value = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                if test_value {
 
                    cur_frame.position = stmt.body.upcast();
 
                } else {
 
                    cur_frame.position = stmt.end_while.upcast();
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndWhile(stmt) => {
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Break(stmt) => {
 
                cur_frame.position = stmt.target.unwrap().upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Continue(stmt) => {
 
                cur_frame.position = stmt.target.unwrap().upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Synchronous(stmt) => {
 
                cur_frame.position = stmt.body.upcast();
 

	
 
                Ok(EvalContinuation::SyncBlockStart)
 
            },
 
            Statement::EndSynchronous(stmt) => {
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::SyncBlockEnd)
 
            },
 
            Statement::Return(stmt) => {
 
                debug_assert!(heap[cur_frame.definition].is_function());
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for return statement");
 

	
 
                // The preceding frame has executed a call, so is expecting the
 
                // return expression on its expression value stack. Note that
 
                // we may be returning a reference to something on our stack,
 
                // so we need to read that value and clone it.
 
                let return_value = cur_frame.expr_values.pop_back().unwrap();
 
                println!("DEBUG: Pre-ret val {:?}", &return_value);
 
                let return_value = match return_value {
 
                    Value::Ref(value_id) => self.store.read_copy(value_id),
 
                    _ => return_value,
 
                };
 
                println!("DEBUG: Pos-ret val {:?}", &return_value);
 

	
 
                // Pre-emptively pop our stack frame
 
                self.frames.pop();
 

	
 
                // Clean up our section of the stack
 
                self.store.clear_stack(0);
 
                let prev_stack_idx = self.store.stack.pop().unwrap().as_stack_boundary();
 

	
 
                // TODO: Temporary hack for testing, remove at some point
 
                if self.frames.is_empty() {
 
                    debug_assert!(prev_stack_idx == -1);
 
                    debug_assert!(self.store.stack.len() == 0);
 
                    self.store.stack.push(return_value);
 
                    return Ok(EvalContinuation::Terminal);
 
                }
 

	
 
                debug_assert!(prev_stack_idx >= 0);
 
                // Return to original state of stack frame
 
                self.store.cur_stack_boundary = prev_stack_idx as usize;
 
                let cur_frame = self.frames.last_mut().unwrap();
 
                cur_frame.expr_values.push_back(return_value);
 

	
 
                // Immediately return, we don't care about the current frame
 
                // anymore and there is nothing left to evaluate
 
                // We just returned to the previous frame, which might be in
 
                // the middle of evaluating expressions for a particular
 
                // statement. So we don't want to enter the code below.
 
                return Ok(EvalContinuation::Stepping);
 
            },
 
            Statement::Goto(stmt) => {
 
                cur_frame.position = stmt.target.unwrap().upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::New(stmt) => {
 
                let call_expr = &heap[stmt.expression];
 
                debug_assert!(heap[call_expr.definition].is_component());
 
                debug_assert_eq!(
 
                    cur_frame.expr_values.len(), heap[call_expr.definition].parameters().len(),
 
                    "mismatch in expr stack size and number of arguments for new statement"
 
                );
 

	
 
                // Note that due to expression value evaluation they exist in
 
                // reverse order on the stack.
 
                // TODO: Revise this code, keep it as is to be compatible with current runtime
 
                let mut args = Vec::new();
 
                while let Some(value) = cur_frame.expr_values.pop_front() {
 
                    args.push(value);
 
                }
 

	
 
                // Construct argument group, thereby copying heap regions
 
                let argument_group = ValueGroup::from_store(&self.store, &args);
 

	
 
                // Clear any heap regions
 
                for arg in &args {
 
                    self.store.drop_value(arg.get_heap_pos());
 
                }
 

	
 
                cur_frame.position = stmt.next;
 

	
 
                todo!("Make sure this is handled correctly, transfer 'heap' values to another Prompt");
 
                Ok(EvalContinuation::NewComponent(call_expr.definition, argument_group))
 
            },
 
            Statement::Expression(stmt) => {
 
                // The expression has just been completely evaluated. Some
 
                // values might have remained on the expression value stack.
 
                cur_frame.expr_values.clear();
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
        };
 

	
 
        assert!(
 
            cur_frame.expr_values.is_empty(),
 
            "This is a debugging assertion that will fail if you perform expressions without \
 
            assigning to anything. This should be completely valid, and this assertions should be \
 
            replaced by something that clears the expression values if needed, but I'll keep this \
 
            in for now for debugging purposes."
 
        );
 

	
 
        // If the next statement requires evaluating expressions then we push
 
        // these onto the expression stack. This way we will evaluate this
 
        // stack in the next loop, then evaluate the statement using the result
 
        // from the expression evaluation.
 
        if !cur_frame.position.is_invalid() {
 
            let stmt = &heap[cur_frame.position];
 

	
 
            match stmt {
 
                Statement::If(stmt) => cur_frame.prepare_single_expression(heap, stmt.test),
 
                Statement::While(stmt) => cur_frame.prepare_single_expression(heap, stmt.test),
 
                Statement::Return(stmt) => {
 
                    debug_assert_eq!(stmt.expressions.len(), 1); // TODO: @ReturnValues
 
                    cur_frame.prepare_single_expression(heap, stmt.expressions[0]);
 
                },
 
                Statement::New(stmt) => {
 
                    // Note that we will end up not evaluating the call itself.
 
                    // Rather we will evaluate its expressions and then
 
                    // instantiate the component upon reaching the "new" stmt.
 
                    let call_expr = &heap[stmt.expression];
 
                    cur_frame.prepare_multiple_expressions(heap, &call_expr.arguments);
 
                },
 
                Statement::Expression(stmt) => {
 
                    cur_frame.prepare_single_expression(heap, stmt.expression);
 
                }
 
                _ => {},
 
            }
 
        }
 

	
 
        return_value
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/store.rs
Show inline comments
 

	
 
use std::collections::VecDeque;
 

	
 
use super::value::{Value, ValueId, HeapPos};
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct HeapAllocation {
 
    pub values: Vec<Value>,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Store {
 
    // The stack where variables/parameters are stored. Note that this is a
 
    // non-shrinking stack. So it may be filled with garbage.
 
    pub(crate) stack: Vec<Value>,
 
    // Represents the place in the stack where we find the `PrevStackBoundary`
 
    // value containing the previous stack boundary. This is so we can pop from
 
    // the stack after function calls.
 
    pub(crate) cur_stack_boundary: usize,
 
    // A rather ridiculous simulated heap, but this allows us to "allocate"
 
    // things that occupy more then one stack slot.
 
    pub(crate) heap_regions: Vec<HeapAllocation>,
 
    pub(crate) free_regions: VecDeque<HeapPos>,
 
}
 

	
 
impl Store {
 
    pub(crate) fn new() -> Self {
 
        let mut store = Self{
 
            stack: Vec::with_capacity(64),
 
            cur_stack_boundary: 0,
 
            heap_regions: Vec::new(),
 
            free_regions: VecDeque::new(),
 
        };
 

	
 
        store.stack.push(Value::PrevStackBoundary(-1));
 
        store
 
    }
 

	
 
    /// Resizes(!) the stack to fit the required number of values. Any
 
    /// unallocated slots are initialized to `Unassigned`. The specified stack
 
    /// index is exclusive.
 
    pub(crate) fn reserve_stack(&mut self, unique_stack_idx: usize) {
 
        let new_size = self.cur_stack_boundary + unique_stack_idx + 1;
 
        if new_size > self.stack.len() {
 
            self.stack.resize(new_size, Value::Unassigned);
 
        }
 
    }
 

	
 
    /// Clears values on the stack and removes their heap allocations when
 
    /// applicable. The specified index itself will also be cleared (so if you
 
    /// specify 0 all values in the frame will be destroyed)
 
    pub(crate) fn clear_stack(&mut self, unique_stack_idx: usize) {
 
        let new_size = self.cur_stack_boundary + unique_stack_idx + 1;
 
        for idx in new_size..self.stack.len() {
 
            self.drop_value(self.stack[idx].get_heap_pos());
 
        }
 
        self.stack.truncate(new_size);
 
    }
 

	
 
    /// Reads a value and takes ownership. This is different from a move because
 
    /// the value might indirectly reference stack/heap values. For these kinds
 
    /// values we will actually return a cloned value.
 
    pub(crate) fn read_take_ownership(&mut self, value: Value) -> Value {
 
        match value {
 
            Value::Ref(ValueId::Stack(pos)) => {
 
                let abs_pos = self.cur_stack_boundary + 1 + pos as usize;
 
                return self.clone_value(self.stack[abs_pos].clone());
 
            },
 
            Value::Ref(ValueId::Heap(heap_pos, value_idx)) => {
 
                let heap_pos = heap_pos as usize;
 
                let value_idx = value_idx as usize;
 
                return self.clone_value(self.heap_regions[heap_pos].values[value_idx].clone());
 
            },
 
            _ => value
 
        }
 
    }
 

	
 
    /// Reads a value from a specific address. The value is always copied, hence
 
    /// if the value ends up not being written, one should call `drop_value` on
 
    /// it.
 
    pub(crate) fn read_copy(&mut self, address: ValueId) -> Value {
 
        match address {
 
            ValueId::Stack(pos) => {
 
                let cur_pos = self.cur_stack_boundary + 1 + pos as usize;
 
                return self.clone_value(self.stack[cur_pos].clone());
 
            },
 
            ValueId::Heap(heap_pos, region_idx) => {
 
                return self.clone_value(self.heap_regions[heap_pos as usize].values[region_idx as usize].clone())
 
            }
 
        }
 
    }
 

	
 
    /// Potentially reads a reference value. The supplied `Value` might not
 
    /// actually live in the store's stack or heap, but live on the expression
 
    /// stack. Generally speaking you only want to call this if the value comes
 
    /// from the expression stack due to borrowing issues.
 
    pub(crate) fn maybe_read_ref<'a>(&'a self, value: &'a Value) -> &'a Value {
 
        match value {
 
            Value::Ref(value_id) => self.read_ref(*value_id),
 
            _ => value,
 
        }
 
    }
 

	
 
    /// Returns an immutable reference to the value pointed to by an address
 
    pub(crate) fn read_ref(&self, address: ValueId) -> &Value {
 
        match address {
 
            ValueId::Stack(pos) => {
 
                let cur_pos = self.cur_stack_boundary + 1 + pos as usize;
 
                return &self.stack[cur_pos];
 
            },
 
            ValueId::Heap(heap_pos, region_idx) => {
 
                return &self.heap_regions[heap_pos as usize].values[region_idx as usize];
 
            }
 
        }
 
    }
 

	
 
    /// Returns a mutable reference to the value pointed to by an address
 
    pub(crate) fn read_mut_ref(&mut self, address: ValueId) -> &mut Value {
 
        match address {
 
            ValueId::Stack(pos) => {
 
                let cur_pos = self.cur_stack_boundary + 1 + pos as usize;
 
                return &mut self.stack[cur_pos];
 
            },
 
            ValueId::Heap(heap_pos, region_idx) => {
 
                return &mut self.heap_regions[heap_pos as usize].values[region_idx as usize];
 
            }
 
        }
 
    }
 

	
 
    /// Writes a value
 
    pub(crate) fn write(&mut self, address: ValueId, value: Value) {
 
        match address {
 
            ValueId::Stack(pos) => {
 
                let cur_pos = self.cur_stack_boundary + 1 + pos as usize;
 
                self.drop_value(self.stack[cur_pos].get_heap_pos());
 
                self.stack[cur_pos] = value;
 
            },
 
            ValueId::Heap(heap_pos, region_idx) => {
 
                let heap_pos = heap_pos as usize;
 
                let region_idx = region_idx as usize;
 
                self.drop_value(self.heap_regions[heap_pos].values[region_idx].get_heap_pos());
 
                self.heap_regions[heap_pos].values[region_idx] = value
 
            }
 
        }
 
    }
 

	
 
    /// This thing takes a cloned Value, because of borrowing issues (which is
 
    /// either a direct value, or might contain an index to a heap value), but
 
    /// should be treated by the programmer as a reference (i.e. don't call
 
    /// `drop_value(thing)` after calling `clone_value(thing.clone())`.
 
    fn clone_value(&mut self, value: Value) -> Value {
 
    pub(crate) fn clone_value(&mut self, value: Value) -> Value {
 
        // Quickly check if the value is not on the heap
 
        let source_heap_pos = value.get_heap_pos();
 
        println!("DEBUG: Cloning {:?}", value);
 
        if source_heap_pos.is_none() {
 
            // We can do a trivial copy, unless we're dealing with a value
 
            // reference
 
            return match value {
 
                Value::Ref(ValueId::Stack(stack_pos)) => {
 
                    let abs_stack_pos = self.cur_stack_boundary + stack_pos as usize + 1;
 
                    self.clone_value(self.stack[abs_stack_pos].clone())
 
                },
 
                Value::Ref(ValueId::Heap(heap_pos, val_idx)) => {
 
                    self.clone_value(self.heap_regions[heap_pos as usize].values[val_idx as usize].clone())
 
                },
 
                _ => value,
 
            };
 
        }
 

	
 
        // Value does live on heap, copy it
 
        let source_heap_pos = source_heap_pos.unwrap() as usize;
 
        let target_heap_pos = self.alloc_heap();
 
        let target_heap_pos_usize = target_heap_pos as usize;
 

	
 
        let num_values = self.heap_regions[source_heap_pos].values.len();
 
        for value_idx in 0..num_values {
 
            let cloned = self.clone_value(self.heap_regions[source_heap_pos].values[value_idx].clone());
 
            self.heap_regions[target_heap_pos_usize].values.push(cloned);
 
        }
 

	
 
        match value {
 
            Value::Message(_) => Value::Message(target_heap_pos),
 
            Value::Array(_) => Value::Array(target_heap_pos),
 
            Value::Union(tag, _) => Value::Union(tag, target_heap_pos),
 
            Value::Struct(_) => Value::Struct(target_heap_pos),
 
            _ => unreachable!("performed clone_value on heap, but {:?} is not a heap value", value),
 
        }
 
    }
 

	
 
    pub(crate) fn drop_value(&mut self, value: Option<HeapPos>) {
 
        if let Some(heap_pos) = value {
 
            self.drop_heap_pos(heap_pos);
 
        }
 
    }
 

	
 
    pub(crate) fn drop_heap_pos(&mut self, heap_pos: HeapPos) {
 
        let num_values = self.heap_regions[heap_pos as usize].values.len();
 
        for value_idx in 0..num_values {
 
            if let Some(other_heap_pos) = self.heap_regions[heap_pos as usize].values[value_idx].get_heap_pos() {
 
                self.drop_heap_pos(other_heap_pos);
 
            }
 
        }
 

	
 
        self.heap_regions[heap_pos as usize].values.clear();
 
        self.free_regions.push_back(heap_pos);
 
    }
 

	
 
    pub(crate) fn alloc_heap(&mut self) -> HeapPos {
 
        if self.free_regions.is_empty() {
 
            let idx = self.heap_regions.len() as HeapPos;
 
            self.heap_regions.push(HeapAllocation{ values: Vec::new() });
 
            return idx;
 
        } else {
 
            let idx = self.free_regions.pop_back().unwrap();
 
            return idx;
 
        }
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/value.rs
Show inline comments
 

	
 
use super::store::*;
 
use crate::PortId;
 
use crate::protocol::ast::{
 
    AssignmentOperator,
 
    BinaryOperator,
 
    UnaryOperator,
 
};
 

	
 
pub type StackPos = u32;
 
pub type HeapPos = u32;
 

	
 
#[derive(Debug, Copy, Clone)]
 
pub enum ValueId {
 
    Stack(StackPos), // place on stack
 
    Heap(HeapPos, u32), // allocated region + values within that region
 
}
 

	
 
/// Represents a value stored on the stack or on the heap. Some values contain
 
/// a `HeapPos`, implying that they're stored in the store's `Heap`. Clearing
 
/// a `Value` with a `HeapPos` from a stack must also clear the associated
 
/// region from the `Heap`.
 
#[derive(Debug, Clone)]
 
pub enum Value {
 
    // Special types, never encountered during evaluation if the compiler works correctly
 
    Unassigned,                 // Marker when variables are first declared, immediately followed by assignment
 
    PrevStackBoundary(isize),   // Marker for stack frame beginning, so we can pop stack values
 
    Ref(ValueId),               // Reference to a value, used by expressions producing references
 
    // Builtin types
 
    Input(PortId),
 
    Output(PortId),
 
    Message(HeapPos),
 
    Null,
 
    Bool(bool),
 
    Char(char),
 
    String(HeapPos),
 
    UInt8(u8),
 
    UInt16(u16),
 
    UInt32(u32),
 
    UInt64(u64),
 
    SInt8(i8),
 
    SInt16(i16),
 
    SInt32(i32),
 
    SInt64(i64),
 
    Array(HeapPos),
 
    // Instances of user-defined types
 
    Enum(i64),
 
    Union(i64, HeapPos),
 
    Struct(HeapPos),
 
}
 

	
 
macro_rules! impl_union_unpack_as_value {
 
    ($func_name:ident, $variant_name:path, $return_type:ty) => {
 
        impl Value {
 
            pub(crate) fn $func_name(&self) -> $return_type {
 
                match self {
 
                    $variant_name(v) => *v,
 
                    _ => panic!(concat!("called ", stringify!($func_name()), " on {:?}"), self),
 
                }
 
            }
 
        }
 
    }
 
}
 

	
 
impl_union_unpack_as_value!(as_stack_boundary, Value::PrevStackBoundary, isize);
 
impl_union_unpack_as_value!(as_ref,     Value::Ref,     ValueId);
 
impl_union_unpack_as_value!(as_input,   Value::Input,   PortId);
 
impl_union_unpack_as_value!(as_output,  Value::Output,  PortId);
 
impl_union_unpack_as_value!(as_message, Value::Message, HeapPos);
 
impl_union_unpack_as_value!(as_bool,    Value::Bool,    bool);
 
impl_union_unpack_as_value!(as_char,    Value::Char,    char);
 
impl_union_unpack_as_value!(as_string,  Value::String,  HeapPos);
 
impl_union_unpack_as_value!(as_uint8,   Value::UInt8,   u8);
 
impl_union_unpack_as_value!(as_uint16,  Value::UInt16,  u16);
 
impl_union_unpack_as_value!(as_uint32,  Value::UInt32,  u32);
 
impl_union_unpack_as_value!(as_uint64,  Value::UInt64,  u64);
 
impl_union_unpack_as_value!(as_sint8,   Value::SInt8,   i8);
 
impl_union_unpack_as_value!(as_sint16,  Value::SInt16,  i16);
 
impl_union_unpack_as_value!(as_sint32,  Value::SInt32,  i32);
 
impl_union_unpack_as_value!(as_sint64,  Value::SInt64,  i64);
 
impl_union_unpack_as_value!(as_array,   Value::Array,   HeapPos);
 
impl_union_unpack_as_value!(as_enum,    Value::Enum,    i64);
 
impl_union_unpack_as_value!(as_struct,  Value::Struct,  HeapPos);
 

	
 
impl Value {
 
    pub(crate) fn as_union(&self) -> (i64, HeapPos) {
 
        match self {
 
            Value::Union(tag, v) => (*tag, *v),
 
            _ => panic!("called as_union on {:?}", self),
 
        }
 
    }
 

	
 
    pub(crate) fn is_integer(&self) -> bool {
 
        match self {
 
            Value::UInt8(_) | Value::UInt16(_) | Value::UInt32(_) | Value::UInt64(_) |
 
            Value::SInt8(_) | Value::SInt16(_) | Value::SInt32(_) | Value::SInt64(_) => true,
 
            _ => false
 
        }
 
    }
 

	
 
    pub(crate) fn is_unsigned_integer(&self) -> bool {
 
        match self {
 
            Value::UInt8(_) | Value::UInt16(_) | Value::UInt32(_) | Value::UInt64(_) => true,
 
            _ => false
 
        }
 
    }
 

	
 
    pub(crate) fn is_signed_integer(&self) -> bool {
 
        match self {
 
            Value::SInt8(_) | Value::SInt16(_) | Value::SInt32(_) | Value::SInt64(_) => true,
 
            _ => false
 
        }
 
    }
 

	
 
    pub(crate) fn as_unsigned_integer(&self) -> u64 {
 
        match self {
 
            Value::UInt8(v)  => *v as u64,
 
            Value::UInt16(v) => *v as u64,
 
            Value::UInt32(v) => *v as u64,
 
            Value::UInt64(v) => *v as u64,
 
            _ => unreachable!("called as_unsigned_integer on {:?}", self),
 
        }
 
    }
 

	
 
    pub(crate) fn as_signed_integer(&self) -> i64 {
 
        match self {
 
            Value::SInt8(v)  => *v as i64,
 
            Value::SInt16(v) => *v as i64,
 
            Value::SInt32(v) => *v as i64,
 
            Value::SInt64(v) => *v as i64,
 
            _ => unreachable!("called as_signed_integer on {:?}", self)
 
        }
 
    }
 

	
 
    /// Returns the heap position associated with the value. If the value
 
    /// doesn't store anything in the heap then we return `None`.
 
    pub(crate) fn get_heap_pos(&self) -> Option<HeapPos> {
 
        match self {
 
            Value::Message(v) => Some(*v),
 
            Value::Array(v) => Some(*v),
 
            Value::Union(_, v) => Some(*v),
 
            Value::Struct(v) => Some(*v),
 
            _ => None
 
        }
 
    }
 
}
 

	
 
/// When providing arguments to a new component, or when transferring values
 
/// from one component's store to a newly instantiated component, one has to
 
/// transfer stack and heap values. This `ValueGroup` represents such a
 
/// temporary group of values with potential heap allocations.
 
///
 
/// Constructing such a ValueGroup manually requires some extra care to make
 
/// sure all elements of `values` point to valid elements of `regions`.
 
///
 
/// Again: this is a temporary thing, hopefully removed once we move to a
 
/// bytecode interpreter.
 
pub struct ValueGroup {
 
    pub(crate) values: Vec<Value>,
 
    pub(crate) regions: Vec<Vec<Value>>
 
}
 

	
 
impl ValueGroup {
 
    pub(crate) fn new_stack(values: Vec<Value>) -> Self {
 
        debug_assert!(values.iter().all(|v| v.get_heap_pos().is_none()));
 
        Self{
 
            values,
 
            regions: Vec::new(),
 
        }
 
    }
 
    pub(crate) fn from_store(store: &Store, values: &[Value]) -> Self {
 
        let mut group = ValueGroup{
 
            values: Vec::with_capacity(values.len()),
 
            regions: Vec::with_capacity(values.len()), // estimation
 
        };
 

	
 
        for value in values {
 
            let transferred = group.retrieve_value(value, store);
 
            group.values.push(transferred);
 
        }
 

	
 
        group
 
    }
 

	
 
    /// Transfers a provided value from a store into a local value with its
 
    /// heap allocations (if any) stored in the ValueGroup. Calling this
 
    /// function will not store the returned value in the `values` member.
 
    fn retrieve_value(&mut self, value: &Value, from_store: &Store) -> Value {
 
        if let Some(heap_pos) = value.get_heap_pos() {
 
            // Value points to a heap allocation, so transfer the heap values
 
            // internally.
 
            let from_region = &from_store.heap_regions[heap_pos as usize].values;
 
            let mut new_region = Vec::with_capacity(from_region.len());
 
            for value in from_region {
 
                let transferred = self.retrieve_value(value, from_store);
 
                new_region.push(transferred);
 
            }
 

	
 
            // Region is constructed, store internally and return the new value.
 
            let new_region_idx = self.regions.len() as HeapPos;
 
            self.regions.push(new_region);
 

	
 
            return match value {
 
                Value::Message(_)    => Value::Message(new_region_idx),
 
                Value::String(_)     => Value::String(new_region_idx),
 
                Value::Array(_)      => Value::Array(new_region_idx),
 
                Value::Union(tag, _) => Value::Union(*tag, new_region_idx),
 
                Value::Struct(_)     => Value::Struct(new_region_idx),
 
                _ => unreachable!(),
 
            };
 
        } else {
 
            return value.clone();
 
        }
 
    }
 

	
 
    /// Transfers the heap values and the stack values into the store. Stack
 
    /// values are pushed onto the Store's stack in the order in which they
 
    /// appear in the value group.
 
    pub(crate) fn into_store(self, store: &mut Store) {
 
        for value in &self.values {
 
            let transferred = self.provide_value(value, store);
 
            store.stack.push(transferred);
 
        }
 
    }
 

	
 
    fn provide_value(&self, value: &Value, to_store: &mut Store) -> Value {
 
        if let Some(from_heap_pos) = value.get_heap_pos() {
 
            let from_heap_pos = from_heap_pos as usize;
 
            let to_heap_pos = to_store.alloc_heap();
 
            let to_heap_pos_usize = to_heap_pos as usize;
 
            to_store.heap_regions[to_heap_pos_usize].values.reserve(self.regions[from_heap_pos].len());
 

	
 
            for value in &self.regions[from_heap_pos as usize] {
 
                let transferred = self.provide_value(value, to_store);
 
                to_store.heap_regions[to_heap_pos_usize].values.push(transferred);
 
            }
 

	
 
            return match value {
 
                Value::Message(_)    => Value::Message(to_heap_pos),
 
                Value::String(_)     => Value::String(to_heap_pos),
 
                Value::Array(_)      => Value::Array(to_heap_pos),
 
                Value::Union(tag, _) => Value::Union(*tag, to_heap_pos),
 
                Value::Struct(_)     => Value::Struct(to_heap_pos),
 
                _ => unreachable!(),
 
            };
 
        } else {
 
            return value.clone();
 
        }
 
    }
 
}
 

	
 
impl Default for ValueGroup {
 
    /// Returns an empty ValueGroup
 
    fn default() -> Self {
 
        Self { values: Vec::new(), regions: Vec::new() }
 
    }
 
}
 

	
 
pub(crate) fn apply_assignment_operator(store: &mut Store, lhs: ValueId, op: AssignmentOperator, rhs: Value) {
 
    use AssignmentOperator as AO;
 

	
 
    macro_rules! apply_int_op {
 
        ($lhs:ident, $assignment_tokens:tt, $operator:ident, $rhs:ident) => {
 
            match $lhs {
 
                Value::UInt8(v)  => { *v $assignment_tokens $rhs.as_uint8();  },
 
                Value::UInt16(v) => { *v $assignment_tokens $rhs.as_uint16(); },
 
                Value::UInt32(v) => { *v $assignment_tokens $rhs.as_uint32(); },
 
                Value::UInt64(v) => { *v $assignment_tokens $rhs.as_uint64(); },
 
                Value::SInt8(v)  => { *v $assignment_tokens $rhs.as_sint8();  },
 
                Value::SInt16(v) => { *v $assignment_tokens $rhs.as_sint16(); },
 
                Value::SInt32(v) => { *v $assignment_tokens $rhs.as_sint32(); },
 
                Value::SInt64(v) => { *v $assignment_tokens $rhs.as_sint64(); },
 
                _ => unreachable!("apply_assignment_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs),
 
            }
 
        }
 
    }
 

	
 
    // let rhs = store.maybe_read_ref(&rhs).clone(); // we don't own this thing, so don't drop it
 
    let lhs = store.read_mut_ref(lhs);
 

	
 
    let mut to_dealloc = None;
 
    match op {
 
        AO::Set => {
 
            match lhs {
 
                Value::Unassigned => { *lhs = rhs; },
 
                Value::Input(v)  => { *v = rhs.as_input(); },
 
                Value::Output(v) => { *v = rhs.as_output(); },
 
                Value::Message(v)  => { to_dealloc = Some(*v); *v = rhs.as_message(); },
 
                Value::Bool(v)    => { *v = rhs.as_bool(); },
 
                Value::Char(v) => { *v = rhs.as_char(); },
 
                Value::String(v) => { *v = rhs.as_string().clone(); },
 
                Value::UInt8(v) => { *v = rhs.as_uint8(); },
 
                Value::UInt16(v) => { *v = rhs.as_uint16(); },
 
                Value::UInt32(v) => { *v = rhs.as_uint32(); },
 
                Value::UInt64(v) => { *v = rhs.as_uint64(); },
 
                Value::SInt8(v) => { *v = rhs.as_sint8(); },
 
                Value::SInt16(v) => { *v = rhs.as_sint16(); },
 
                Value::SInt32(v) => { *v = rhs.as_sint32(); },
 
                Value::SInt64(v) => { *v = rhs.as_sint64(); },
 
                Value::Array(v) => { to_dealloc = Some(*v); *v = rhs.as_array(); },
 
                Value::Enum(v) => { *v = rhs.as_enum(); },
 
                Value::Union(lhs_tag, lhs_heap_pos) => {
 
                    to_dealloc = Some(*lhs_heap_pos);
 
                    let (rhs_tag, rhs_heap_pos) = rhs.as_union();
 
                    *lhs_tag = rhs_tag;
 
                    *lhs_heap_pos = rhs_heap_pos;
 
                }
 
                Value::Struct(v) => { to_dealloc = Some(*v); *v = rhs.as_struct(); },
 
                _ => unreachable!("apply_assignment_operator {:?} on lhs {:?} and rhs {:?}", op, lhs, rhs),
 
            }
 
        },
 
        AO::Multiplied =>   { apply_int_op!(lhs, *=,  op, rhs) },
 
        AO::Divided =>      { apply_int_op!(lhs, /=,  op, rhs) },
 
        AO::Remained =>     { apply_int_op!(lhs, %=,  op, rhs) },
 
        AO::Added =>        { apply_int_op!(lhs, +=,  op, rhs) },
 
        AO::Subtracted =>   { apply_int_op!(lhs, -=,  op, rhs) },
 
        AO::ShiftedLeft =>  { apply_int_op!(lhs, <<=, op, rhs) },
 
        AO::ShiftedRight => { apply_int_op!(lhs, >>=, op, rhs) },
 
        AO::BitwiseAnded => { apply_int_op!(lhs, &=,  op, rhs) },
 
        AO::BitwiseXored => { apply_int_op!(lhs, ^=,  op, rhs) },
 
        AO::BitwiseOred =>  { apply_int_op!(lhs, |=,  op, rhs) },
 
    }
 

	
 
    if let Some(heap_pos) = to_dealloc {
 
        store.drop_heap_pos(heap_pos);
 
    }
 
}
 

	
 
pub(crate) fn apply_binary_operator(store: &mut Store, lhs: &Value, op: BinaryOperator, rhs: &Value) -> Value {
 
    use BinaryOperator as BO;
 

	
 
    macro_rules! apply_int_op_and_return_self {
 
        ($lhs:ident, $operator_tokens:tt, $operator:ident, $rhs:ident) => {
 
            return match $lhs {
 
                Value::UInt8(v)  => { Value::UInt8( *v $operator_tokens $rhs.as_uint8() ) },
 
                Value::UInt16(v) => { Value::UInt16(*v $operator_tokens $rhs.as_uint16()) },
 
                Value::UInt32(v) => { Value::UInt32(*v $operator_tokens $rhs.as_uint32()) },
 
                Value::UInt64(v) => { Value::UInt64(*v $operator_tokens $rhs.as_uint64()) },
 
                Value::SInt8(v)  => { Value::SInt8( *v $operator_tokens $rhs.as_sint8() ) },
 
                Value::SInt16(v) => { Value::SInt16(*v $operator_tokens $rhs.as_sint16()) },
 
                Value::SInt32(v) => { Value::SInt32(*v $operator_tokens $rhs.as_sint32()) },
 
                Value::SInt64(v) => { Value::SInt64(*v $operator_tokens $rhs.as_sint64()) },
 
                _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs)
 
            };
 
        }
 
    }
 

	
 
    macro_rules! apply_int_op_and_return_bool {
 
        ($lhs:ident, $operator_tokens:tt, $operator:ident, $rhs:ident) => {
 
            return match $lhs {
 
                Value::UInt8(v)  => { Value::Bool(*v $operator_tokens $rhs.as_uint8() ) },
 
                Value::UInt16(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint16()) },
 
                Value::UInt32(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint32()) },
 
                Value::UInt64(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint64()) },
 
                Value::SInt8(v)  => { Value::Bool(*v $operator_tokens $rhs.as_sint8() ) },
 
                Value::SInt16(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint16()) },
 
                Value::SInt32(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint32()) },
 
                Value::SInt64(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint64()) },
 
                _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs)
 
            };
 
        }
 
    }
 

	
 
    // We need to handle concatenate in a special way because it needs the store
 
    // mutably.
 
    if op == BO::Concatenate {
 
        let target_heap_pos = store.alloc_heap();
 
        let lhs_heap_pos;
 
        let rhs_heap_pos;
 

	
 
        let lhs = store.maybe_read_ref(lhs);
 
        let rhs = store.maybe_read_ref(rhs);
 

	
 
        enum ValueKind { Message, String, Array };
 
        let value_kind;
 

	
 
        match lhs {
 
            Value::Message(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_message();
 
                value_kind = ValueKind::Message;
 
            },
 
            Value::String(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_string();
 
                value_kind = ValueKind::String;
 
            },
 
            Value::Array(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_array();
 
                value_kind = ValueKind::Array;
 
            },
 
            _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", op, lhs, rhs)
 
        }
 

	
 
        let lhs_heap_pos = lhs_heap_pos as usize;
 
        let rhs_heap_pos = rhs_heap_pos as usize;
 

	
 
        // TODO: I hate this, but fine...
 
        let mut concatenated = Vec::new();
 
        concatenated.extend_from_slice(&store.heap_regions[lhs_heap_pos as usize].values);
 
        concatenated.extend_from_slice(&store.heap_regions[rhs_heap_pos as usize].values);
 
        let lhs_len = store.heap_regions[lhs_heap_pos].values.len();
 
        let rhs_len = store.heap_regions[rhs_heap_pos].values.len();
 
        concatenated.reserve(lhs_len + rhs_len);
 
        for idx in 0..lhs_len {
 
            concatenated.push(store.clone_value(store.heap_regions[lhs_heap_pos].values[idx].clone()));
 
        }
 
        for idx in 0..rhs_len {
 
            concatenated.push(store.clone_value(store.heap_regions[rhs_heap_pos].values[idx].clone()));
 
        }
 

	
 
        store.heap_regions[target_heap_pos as usize].values = concatenated;
 

	
 
        return match value_kind{
 
            ValueKind::Message => Value::Message(target_heap_pos),
 
            ValueKind::String => Value::String(target_heap_pos),
 
            ValueKind::Array => Value::Array(target_heap_pos),
 
        };
 
    }
 

	
 
    // If any of the values are references, retrieve the thing they're referring
 
    // to.
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    match op {
 
        BO::Concatenate => unreachable!(),
 
        BO::LogicalOr => {
 
            return Value::Bool(lhs.as_bool() || rhs.as_bool());
 
        },
 
        BO::LogicalAnd => {
 
            return Value::Bool(lhs.as_bool() && rhs.as_bool());
 
        },
 
        BO::BitwiseOr        => { apply_int_op_and_return_self!(lhs, |,  op, rhs); },
 
        BO::BitwiseXor       => { apply_int_op_and_return_self!(lhs, ^,  op, rhs); },
 
        BO::BitwiseAnd       => { apply_int_op_and_return_self!(lhs, &,  op, rhs); },
 
        BO::Equality         => { Value::Bool(apply_equality_operator(store, lhs, rhs)) },
 
        BO::Inequality       => { Value::Bool(apply_inequality_operator(store, lhs, rhs)) },
 
        BO::LessThan         => { apply_int_op_and_return_bool!(lhs, <,  op, rhs); },
 
        BO::GreaterThan      => { apply_int_op_and_return_bool!(lhs, >,  op, rhs); },
 
        BO::LessThanEqual    => { apply_int_op_and_return_bool!(lhs, <=, op, rhs); },
 
        BO::GreaterThanEqual => { apply_int_op_and_return_bool!(lhs, >=, op, rhs); },
 
        BO::ShiftLeft        => { apply_int_op_and_return_self!(lhs, <<, op, rhs); },
 
        BO::ShiftRight       => { apply_int_op_and_return_self!(lhs, >>, op, rhs); },
 
        BO::Add              => { apply_int_op_and_return_self!(lhs, +,  op, rhs); },
 
        BO::Subtract         => { apply_int_op_and_return_self!(lhs, -,  op, rhs); },
 
        BO::Multiply         => { apply_int_op_and_return_self!(lhs, *,  op, rhs); },
 
        BO::Divide           => { apply_int_op_and_return_self!(lhs, /,  op, rhs); },
 
        BO::Remainder        => { apply_int_op_and_return_self!(lhs, %,  op, rhs); }
 
    }
 
}
 

	
 
pub(crate) fn apply_unary_operator(store: &mut Store, op: UnaryOperator, value: &Value) -> Value {
 
    use UnaryOperator as UO;
 

	
 
    macro_rules! apply_int_expr_and_return {
 
        ($value:ident, $apply:tt, $op:ident) => {
 
            return match $value {
 
                Value::UInt8(v)  => Value::UInt8($apply *v),
 
                Value::UInt16(v) => Value::UInt16($apply *v),
 
                Value::UInt32(v) => Value::UInt32($apply *v),
 
                Value::UInt64(v) => Value::UInt64($apply *v),
 
                Value::SInt8(v)  => Value::SInt8($apply *v),
 
                Value::SInt16(v) => Value::SInt16($apply *v),
 
                Value::SInt32(v) => Value::SInt32($apply *v),
 
                Value::SInt64(v) => Value::SInt64($apply *v),
 
                _ => unreachable!("apply_unary_operator {:?} on value {:?}", $op, $value),
 
            };
 
        }
 
    }
 

	
 
    // If the value is a reference, retrieve the thing it is referring to
 
    let value = store.maybe_read_ref(value);
 

	
 
    match op {
 
        UO::Positive => {
 
            debug_assert!(value.is_integer());
 
            return value.clone();
 
        },
 
        UO::Negative => {
 
            // TODO: Error on negating unsigned integers
 
            return match value {
 
                Value::SInt8(v) => Value::SInt8(-*v),
 
                Value::SInt16(v) => Value::SInt16(-*v),
 
                Value::SInt32(v) => Value::SInt32(-*v),
 
                Value::SInt64(v) => Value::SInt64(-*v),
 
                _ => unreachable!("apply_unary_operator {:?} on value {:?}", op, value),
 
            }
 
        },
 
        UO::BitwiseNot => { apply_int_expr_and_return!(value, !, op)},
 
        UO::LogicalNot => { return Value::Bool(!value.as_bool()); },
 
        UO::PreIncrement => { todo!("implement") },
 
        UO::PreDecrement => { todo!("implement") },
 
        UO::PostIncrement => { todo!("implement") },
 
        UO::PostDecrement => { todo!("implement") },
 
    }
 
}
 

	
 
pub(crate) fn apply_equality_operator(store: &Store, lhs: &Value, rhs: &Value) -> bool {
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    fn eval_equality_heap(store: &Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_vals = &store.heap_regions[lhs_pos as usize].values;
 
        let rhs_vals = &store.heap_regions[rhs_pos as usize].values;
 
        let lhs_len = lhs_vals.len();
 
        if lhs_len != rhs_vals.len() {
 
            return false;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            let lhs_val = &lhs_vals[idx];
 
            let rhs_val = &rhs_vals[idx];
 
            if !apply_equality_operator(store, lhs_val, rhs_val) {
 
                return false;
 
            }
 
        }
 

	
 
        return true;
 
    }
 

	
 
    match lhs {
 
        Value::Input(v) => *v == rhs.as_input(),
 
        Value::Output(v) => *v == rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => *v == rhs.as_bool(),
 
        Value::Char(v) => *v == rhs.as_char(),
 
        Value::String(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => *v == rhs.as_uint8(),
 
        Value::UInt16(v) => *v == rhs.as_uint16(),
 
        Value::UInt32(v) => *v == rhs.as_uint32(),
 
        Value::UInt64(v) => *v == rhs.as_uint64(),
 
        Value::SInt8(v) => *v == rhs.as_sint8(),
 
        Value::SInt16(v) => *v == rhs.as_sint16(),
 
        Value::SInt32(v) => *v == rhs.as_sint32(),
 
        Value::SInt64(v) => *v == rhs.as_sint64(),
 
        Value::Enum(v) => *v == rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if *lhs_tag != rhs_tag {
 
                return false;
 
            }
 
            eval_equality_heap(store, *lhs_pos, rhs_pos)
 
        },
 
        Value::Struct(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_equality_operator to lhs {:?}", lhs),
 
    }
 
}
 

	
 
pub(crate) fn apply_inequality_operator(store: &Store, lhs: &Value, rhs: &Value) -> bool {
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    fn eval_inequality_heap(store: &Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_vals = &store.heap_regions[lhs_pos as usize].values;
 
        let rhs_vals = &store.heap_regions[rhs_pos as usize].values;
 
        let lhs_len = lhs_vals.len();
 
        if lhs_len != rhs_vals.len() {
 
            return true;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            let lhs_val = &lhs_vals[idx];
 
            let rhs_val = &rhs_vals[idx];
 
            if apply_inequality_operator(store, lhs_val, rhs_val) {
 
                return true;
 
            }
 
        }
 

	
 
        return false;
 
    }
 

	
 
    match lhs {
 
        Value::Input(v) => *v != rhs.as_input(),
 
        Value::Output(v) => *v != rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => *v != rhs.as_bool(),
 
        Value::Char(v) => *v != rhs.as_char(),
 
        Value::String(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => *v != rhs.as_uint8(),
 
        Value::UInt16(v) => *v != rhs.as_uint16(),
 
        Value::UInt32(v) => *v != rhs.as_uint32(),
 
        Value::UInt64(v) => *v != rhs.as_uint64(),
 
        Value::SInt8(v) => *v != rhs.as_sint8(),
 
        Value::SInt16(v) => *v != rhs.as_sint16(),
 
        Value::SInt32(v) => *v != rhs.as_sint32(),
 
        Value::SInt64(v) => *v != rhs.as_sint64(),
 
        Value::Enum(v) => *v != rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if *lhs_tag != rhs_tag {
 
                return true;
 
            }
 
            eval_inequality_heap(store, *lhs_pos, rhs_pos)
 
        },
 
        Value::String(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_inequality_operator to lhs {:?}", lhs)
 
    }
 
}
 
\ No newline at end of file
src/protocol/parser/pass_typing.rs
Show inline comments
 
/// pass_typing
 
///
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter casee we return an
 
/// error.
 
///
 
/// Inference may be applied on non-polymorphic procedures and on polymorphic
 
/// procedures. When dealing with a non-polymorphic procedure we apply the type
 
/// resolver and annotate the AST with the `ConcreteType`s. When dealing with
 
/// polymorphic procedures we will only annotate the AST once, preserving
 
/// references to polymorphic variables. Any later pass will perform just the
 
/// type checking.
 
///
 
/// TODO: Needs a thorough rewrite:
 
///  0. polymorph_progress is intentionally broken at the moment.
 
///  1. For polymorphic type inference we need to have an extra datastructure
 
///     for progressing the polymorphic variables and mapping them back to each
 
///     signature type that uses that polymorphic type. The two types of markers
 
///     became somewhat of a mess.
 
///  2. We're doing a lot of extra work. It seems better to apply the initial
 
///     type based on expression parents, then to apply forced constraints (arg
 
///     to a fires() call must be port-like), only then to start progressing the
 
///     types.
 
///     Furthermore, queueing of expressions can be more intelligent, currently
 
///     every child/parent of an expression is inferred again when queued. Hence
 
///     we need to queue only specific children/parents of expressions.
 
///  3. Remove the `msg` type?
 
///  4. Disallow certain types in certain operations (e.g. `Void`).
 
///  5. Implement implicit and explicit casting.
 
///  6. Investigate different ways of performing the type-on-type inference,
 
///     maybe there is a better way then flattened trees + markers?
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet};
 

	
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
use crate::protocol::parser::type_table::*;
 
use crate::protocol::parser::token_parsing::*;
 
use super::visitor::{
 
    STMT_BUFFER_INIT_CAPACITY,
 
    EXPR_BUFFER_INIT_CAPACITY,
 
    Ctx,
 
    Visitor2,
 
    VisitorResult
 
};
 
use std::collections::hash_map::Entry;
 

	
 
const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ];
 
const STRING_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::String ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
/// TODO: @types Remove the Message -> Byte hack at some point...
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // A marker with an identifier which we can use to retrieve the type subtree
 
    // that follows the marker. This is used to perform type inference on
 
    // polymorphs: an expression may determine the polymorphs type, after we
 
    // need to apply that information to all other places where the polymorph is
 
    // used.
 
    MarkerDefinition(usize), // marker for polymorph types on a procedure's definition
 
    MarkerBody(usize), // marker for polymorph types within a procedure body
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Bool,
 
    UInt8,
 
    UInt16,
 
    UInt32,
 
    UInt64,
 
    SInt8,
 
    SInt16,
 
    SInt32,
 
    SInt64,
 
    Character,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, usize)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        match self {
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | 
 
            ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        // TODO: @float
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
        ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_msg_array_or_slice(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_msg_array_or_slice()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Bool |
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 |
 
            ITP::Character | ITP::String => {
 
                -1
 
            },
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Instance(_, num_args) => {
 
                (*num_args as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
impl From<ConcreteTypePart> for InferenceTypePart {
 
    fn from(v: ConcreteTypePart) -> InferenceTypePart {
 
        use ConcreteTypePart as CTP;
 
        use InferenceTypePart as ITP;
 

	
 
        match v {
 
            CTP::Marker(_) => {
 
                unreachable!("encountered marker while converting concrete type to inferred type");
 
            }
 
            CTP::Void => ITP::Void,
 
            CTP::Message => ITP::Message,
 
            CTP::Bool => ITP::Bool,
 
            CTP::UInt8 => ITP::UInt8,
 
            CTP::UInt16 => ITP::UInt16,
 
            CTP::UInt32 => ITP::UInt32,
 
            CTP::UInt64 => ITP::UInt64,
 
            CTP::SInt8 => ITP::SInt8,
 
            CTP::SInt16 => ITP::SInt16,
 
            CTP::SInt32 => ITP::SInt32,
 
            CTP::SInt64 => ITP::SInt64,
 
            CTP::Character => ITP::Character,
 
            CTP::String => ITP::String,
 
            CTP::Array => ITP::Array,
 
            CTP::Slice => ITP::Slice,
 
            CTP::Input => ITP::Input,
 
            CTP::Output => ITP::Output,
 
            CTP::Instance(id, num) => ITP::Instance(id, num),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug)]
 
struct InferenceType {
 
    has_body_marker: bool,
 
    is_done: bool,
 
    parts: Vec<InferenceTypePart>,
 
}
 

	
 
impl InferenceType {
 
    /// Generates a new InferenceType. The two boolean flags will be checked in
 
    /// debug mode.
 
    fn new(has_body_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
        if cfg!(debug_assertions) {
 
            debug_assert!(!parts.is_empty());
 
            let parts_body_marker = parts.iter().any(|v| {
 
                if let InferenceTypePart::MarkerBody(_) = v { true } else { false }
 
            });
 
            debug_assert_eq!(has_body_marker, parts_body_marker);
 
            let parts_done = parts.iter().all(|v| v.is_concrete());
 
            debug_assert_eq!(is_done, parts_done, "{:?}", parts);
 
        }
 
        Self{ has_body_marker, is_done, parts }
 
    }
 

	
 
    /// Replaces a type subtree with the provided subtree. The caller must make
 
    /// sure the the replacement is a well formed type subtree.
 
    fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) {
 
        let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
        debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0));
 
        self.parts.splice(start_idx..end_idx, with.iter().cloned());
 
        self.recompute_is_done();
 
    }
 

	
 
    // TODO: @performance, might all be done inline in the type inference methods
 
    fn recompute_is_done(&mut self) {
 
        self.is_done = self.parts.iter().all(|v| v.is_concrete());
 
    }
 

	
 
    /// Seeks a body marker starting at the specified position. If a marker is
 
    /// found then its value and the index of the type subtree that follows it
 
    /// is returned.
 
    fn find_body_marker(&self, mut start_idx: usize) -> Option<(usize, usize)> {
 
        while start_idx < self.parts.len() {
 
            if let InferenceTypePart::MarkerBody(marker) = &self.parts[start_idx] {
 
                return Some((*marker, start_idx + 1))
 
            }
 

	
 
            start_idx += 1;
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Returns an iterator over all body markers and the partial type tree that
 
    /// follows those markers. If it is a problem that `InferenceType` is 
 
    /// borrowed by the iterator, then use `find_body_marker`.
 
    fn body_marker_iter(&self) -> InferenceTypeMarkerIter {
 
        InferenceTypeMarkerIter::new(&self.parts)
 
    }
 

	
 
    /// Given that the `parts` are a depth-first serialized tree of types, this
 
    /// function finds the subtree anchored at a specific node. The returned 
 
    /// index is exclusive.
 
    fn find_subtree_end_idx(parts: &[InferenceTypePart], start_idx: usize) -> usize {
 
        let mut depth = 1;
 
        let mut idx = start_idx;
 

	
 
        while idx < parts.len() {
 
            depth += parts[idx].depth_change();
 
            if depth == 0 {
 
                return idx + 1;
 
            }
 
            idx += 1;
 
        }
 

	
 
        // If here, then the inference type is malformed
 
        unreachable!();
 
    }
 

	
 
    /// Call that attempts to infer the part at `to_infer.parts[to_infer_idx]` 
 
    /// using the subtree at `template.parts[template_idx]`. Will return 
 
    /// `Some(depth_change_due_to_traversal)` if type inference has been 
 
    /// applied. In this case the indices will also be modified to point to the 
 
    /// next part in both templates. If type inference has not (or: could not) 
 
    /// be applied then `None` will be returned. Note that this might mean that 
 
    /// the types are incompatible.
 
    ///
 
    /// As this is a helper functions, some assumptions: the parts are not 
 
    /// exactly equal, and neither of them contains a marker. Also: only the
 
    /// `to_infer` parts are checked for inference. It might be that this 
 
    /// function returns `None`, but that that `template` is still compatible
 
    /// with `to_infer`, e.g. when `template` has an `Unknown` part.
 
    fn infer_part_for_single_type(
 
        to_infer: &mut InferenceType, to_infer_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize,
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_infer_part = &to_infer.parts[*to_infer_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // TODO: Maybe do this differently?
 
        let mut template_definition_marker = None;
 
        if *template_idx > 0 {
 
            if let ITP::MarkerDefinition(marker) = &template_parts[*template_idx - 1] {
 
                template_definition_marker = Some(*marker)
 
            }
 
        }
 

	
 
        // Check for programmer mistakes
 
        debug_assert_ne!(to_infer_part, template_part);
 
        debug_assert!(!to_infer_part.is_marker(), "marker encountered in 'infer part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        // Inference of a somewhat-specified type
 
        if to_infer_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_infer_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 

	
 
            if let Some(marker) = template_definition_marker {
 
                to_infer.parts.insert(*to_infer_idx, ITP::MarkerDefinition(marker));
 
                *to_infer_idx += 1;
 
            }
 

	
 
            to_infer.parts[*to_infer_idx] = template_part.clone();
 

	
 
            *to_infer_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        // Inference of a completely unknown type
 
        if *to_infer_part == ITP::Unknown {
 
            // template part is different, so cannot be unknown, hence copy the
 
            // entire subtree. Make sure to copy definition markers, but not to
 
            // transfer polymorph markers.
 
            let template_end_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 
            let template_start_idx = if let Some(marker) = template_definition_marker {
 
                to_infer.parts[*to_infer_idx] = ITP::MarkerDefinition(marker);
 
                *template_idx
 
            } else {
 
                to_infer.parts[*to_infer_idx] = template_parts[*template_idx].clone();
 
                *template_idx + 1
 
            };
 
            *to_infer_idx += 1;
 

	
 
            for template_idx in template_start_idx..template_end_idx {
 
                let template_part = &template_parts[template_idx];
 
                if let ITP::MarkerBody(_) = template_part {
 
                    // Do not copy this one
 
                } else {
 
                    to_infer.parts.insert(*to_infer_idx, template_part.clone());
 
                    *to_infer_idx += 1;
 
                }
 
            }
 
            *template_idx = template_end_idx;
 

	
 
            // Note: by definition the LHS was Unknown and the RHS traversed a 
 
            // full subtree.
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Call that checks if the `to_check` part is compatible with the `infer`
 
    /// part. This is essentially a copy of `infer_part_for_single_type`, but
 
    /// without actually copying the type parts.
 
    fn check_part_for_single_type(
 
        to_check_parts: &[InferenceTypePart], to_check_idx: &mut usize,
 
        template_parts: &[InferenceTypePart], template_idx: &mut usize
 
    ) -> Option<i32> {
 
        use InferenceTypePart as ITP;
 

	
 
        let to_check_part = &to_check_parts[*to_check_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // Checking programmer errors
 
        debug_assert_ne!(to_check_part, template_part);
 
        debug_assert!(!to_check_part.is_marker(), "marker encountered in 'to_check part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        if to_check_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_check_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 
            *to_check_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        if *to_check_part == ITP::Unknown {
 
            *to_check_idx += 1;
 
            *template_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 

	
 
            // By definition LHS and RHS had depth change of -1
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Attempts to infer types between two `InferenceType` instances. This 
 
    /// function is unsafe as it accepts pointers to work around Rust's 
 
    /// borrowing rules. The caller must ensure that the pointers are distinct.
 
    unsafe fn infer_subtrees_for_both_types(
 
        type_a: *mut InferenceType, start_idx_a: usize,
 
        type_b: *mut InferenceType, start_idx_b: usize
 
    ) -> DualInferenceResult {
 
@@ -829,1109 +830,1124 @@ enum SingleInferenceResult {
 
    Modified,
 
    Incompatible
 
}
 

	
 
enum DefinitionType{
 
    Component(ComponentDefinitionId),
 
    Function(FunctionDefinitionId),
 
}
 

	
 
impl DefinitionType {
 
    fn definition_id(&self) -> DefinitionId {
 
        match self {
 
            DefinitionType::Component(v) => v.upcast(),
 
            DefinitionType::Function(v) => v.upcast(),
 
        }
 
    }
 
}
 

	
 
#[derive(PartialEq, Eq)]
 
pub(crate) struct ResolveQueueElement {
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) monomorph_types: Vec<ConcreteType>,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct PassTyping {
 
    // Current definition we're typechecking.
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 

	
 
    // Buffers for iteration over substatements and subexpressions
 
    stmt_buffer: Vec<StatementId>,
 
    expr_buffer: Vec<ExpressionId>,
 

	
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,      // types of variables
 
    expr_types: HashMap<ExpressionId, InferenceType>,   // types of expressions
 
    extra_data: HashMap<ExpressionId, ExtraData>,       // data for polymorph inference
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    expr_queued: HashSet<ExpressionId>,
 
}
 

	
 
// TODO: @rename used for calls and struct literals, maybe union literals?
 
struct ExtraData {
 
    /// Progression of polymorphic variables (if any)
 
    poly_vars: Vec<InferenceType>,
 
    /// Progression of types of call arguments or struct members
 
    embedded: Vec<InferenceType>,
 
    returned: InferenceType,
 
}
 

	
 
struct VarData {
 
    /// Type of the variable
 
    var_type: InferenceType,
 
    /// VariableExpressions that use the variable
 
    used_at: Vec<ExpressionId>,
 
    /// For channel statements we link to the other variable such that when one
 
    /// channel's interior type is resolved, we can also resolve the other one.
 
    linked_var: Option<VariableId>,
 
}
 

	
 
impl VarData {
 
    fn new_channel(var_type: InferenceType, other_port: VariableId) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: Some(other_port) }
 
    }
 
    fn new_local(var_type: InferenceType) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: None }
 
    }
 
}
 

	
 
impl PassTyping {
 
    pub(crate) fn new() -> Self {
 
        PassTyping {
 
            definition_type: DefinitionType::Function(FunctionDefinitionId::new_invalid()),
 
            poly_vars: Vec::new(),
 
            stmt_buffer: Vec::with_capacity(STMT_BUFFER_INIT_CAPACITY),
 
            expr_buffer: Vec::with_capacity(EXPR_BUFFER_INIT_CAPACITY),
 
            var_types: HashMap::new(),
 
            expr_types: HashMap::new(),
 
            extra_data: HashMap::new(),
 
            expr_queued: HashSet::new(),
 
        }
 
    }
 

	
 
    // TODO: @cleanup Unsure about this, maybe a pattern will arise after
 
    //  a while.
 
    pub(crate) fn queue_module_definitions(ctx: &Ctx, queue: &mut ResolveQueue) {
 
        debug_assert_eq!(ctx.module.phase, ModuleCompilationPhase::ValidatedAndLinked);
 
        let root_id = ctx.module.root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 
            match definition {
 
                Definition::Function(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Component(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => {},
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        // Visit the definition
 
        debug_assert_eq!(ctx.module.root_id, element.root_id);
 
        self.reset();
 
        self.poly_vars.clear();
 
        self.poly_vars.extend(element.monomorph_types.iter().cloned());
 
        self.visit_definition(ctx, element.definition_id)?;
 

	
 
        // Keep resolving types
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.definition_type = DefinitionType::Function(FunctionDefinitionId::new_invalid());
 
        self.poly_vars.clear();
 
        self.stmt_buffer.clear();
 
        self.expr_buffer.clear();
 
        self.var_types.clear();
 
        self.expr_types.clear();
 
        self.extra_data.clear();
 
        self.expr_queued.clear();
 
    }
 
}
 

	
 
impl Visitor2 for PassTyping {
 
    // Definitions
 

	
 
    fn visit_component_definition(&mut self, ctx: &mut Ctx, id: ComponentDefinitionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Component(id);
 

	
 
        let comp_def = &ctx.heap[id];
 
        debug_assert_eq!(comp_def.poly_vars.len(), self.poly_vars.len(), "component polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting component '{}': {}", comp_def.identifier.value.as_str(), id.0.index);
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in comp_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected component arguments to be concrete types");
 
            self.var_types.insert(param_id, VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_block_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    fn visit_function_definition(&mut self, ctx: &mut Ctx, id: FunctionDefinitionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Function(id);
 

	
 
        let func_def = &ctx.heap[id];
 
        debug_assert_eq!(func_def.poly_vars.len(), self.poly_vars.len(), "function polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting function '{}': {}", func_def.identifier.value.as_str(), id.0.index);
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in func_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected function arguments to be concrete types");
 
            self.var_types.insert(param_id, VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_block_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    // Statements
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Transfer statements for traversal
 
        let block = &ctx.heap[id];
 

	
 
        for stmt_id in block.statements.clone() {
 
            self.visit_stmt(ctx, stmt_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let memory_stmt = &ctx.heap[id];
 

	
 
        let local = &ctx.heap[memory_stmt.variable];
 
        let var_type = self.determine_inference_type_from_parser_type_elements(&local.parser_type.elements, true);
 
        self.var_types.insert(memory_stmt.variable, VarData::new_local(var_type));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult {
 
        let channel_stmt = &ctx.heap[id];
 

	
 
        let from_local = &ctx.heap[channel_stmt.from];
 
        let from_var_type = self.determine_inference_type_from_parser_type_elements(&from_local.parser_type.elements, true);
 
        self.var_types.insert(from_local.this, VarData::new_channel(from_var_type, channel_stmt.to));
 

	
 
        let to_local = &ctx.heap[channel_stmt.to];
 
        let to_var_type = self.determine_inference_type_from_parser_type_elements(&to_local.parser_type.elements, true);
 
        self.var_types.insert(to_local.this, VarData::new_channel(to_var_type, channel_stmt.from));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult {
 
        let labeled_stmt = &ctx.heap[id];
 
        let substmt_id = labeled_stmt.body;
 
        self.visit_stmt(ctx, substmt_id)
 
    }
 

	
 
    fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult {
 
        let if_stmt = &ctx.heap[id];
 

	
 
        let true_body_id = if_stmt.true_body;
 
        let false_body_id = if_stmt.false_body;
 
        let test_expr_id = if_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_block_stmt(ctx, true_body_id)?;
 
        if let Some(false_body_id) = false_body_id {
 
            self.visit_block_stmt(ctx, false_body_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult {
 
        let while_stmt = &ctx.heap[id];
 

	
 
        let body_id = while_stmt.body;
 
        let test_expr_id = while_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_block_stmt(ctx, body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult {
 
        let sync_stmt = &ctx.heap[id];
 
        let body_id = sync_stmt.body;
 

	
 
        self.visit_block_stmt(ctx, body_id)
 
    }
 

	
 
    fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult {
 
        let return_stmt = &ctx.heap[id];
 
        debug_assert_eq!(return_stmt.expressions.len(), 1);
 
        let expr_id = return_stmt.expressions[0];
 

	
 
        self.visit_expr(ctx, expr_id)
 
    }
 

	
 
    fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult {
 
        let new_stmt = &ctx.heap[id];
 
        let call_expr_id = new_stmt.expression;
 

	
 
        self.visit_call_expr(ctx, call_expr_id)
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        let expr_stmt = &ctx.heap[id];
 
        let subexpr_id = expr_stmt.expression;
 

	
 
        self.visit_expr(ctx, subexpr_id)
 
    }
 

	
 
    // Expressions
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let assign_expr = &ctx.heap[id];
 
        let left_expr_id = assign_expr.left;
 
        let right_expr_id = assign_expr.right;
 

	
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.visit_expr(ctx, right_expr_id)?;
 

	
 
        self.progress_assignment_expr(ctx, id)
 
    }
 

	
 
    fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let conditional_expr = &ctx.heap[id];
 
        let test_expr_id = conditional_expr.test;
 
        let true_expr_id = conditional_expr.true_expression;
 
        let false_expr_id = conditional_expr.false_expression;
 

	
 
        self.expr_types.insert(test_expr_id, InferenceType::new(false, true, vec![InferenceTypePart::Bool]));
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_expr(ctx, true_expr_id)?;
 
        self.visit_expr(ctx, false_expr_id)?;
 

	
 
        self.progress_conditional_expr(ctx, id)
 
    }
 

	
 
    fn visit_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let binary_expr = &ctx.heap[id];
 
        let lhs_expr_id = binary_expr.left;
 
        let rhs_expr_id = binary_expr.right;
 

	
 
        self.visit_expr(ctx, lhs_expr_id)?;
 
        self.visit_expr(ctx, rhs_expr_id)?;
 

	
 
        self.progress_binary_expr(ctx, id)
 
    }
 

	
 
    fn visit_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let unary_expr = &ctx.heap[id];
 
        let arg_expr_id = unary_expr.expression;
 

	
 
        self.visit_expr(ctx, arg_expr_id)?;
 

	
 
        self.progress_unary_expr(ctx, id)
 
    }
 

	
 
    fn visit_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let indexing_expr = &ctx.heap[id];
 
        let subject_expr_id = indexing_expr.subject;
 
        let index_expr_id = indexing_expr.index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, index_expr_id)?;
 

	
 
        self.progress_indexing_expr(ctx, id)
 
    }
 

	
 
    fn visit_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let slicing_expr = &ctx.heap[id];
 
        let subject_expr_id = slicing_expr.subject;
 
        let from_expr_id = slicing_expr.from_index;
 
        let to_expr_id = slicing_expr.to_index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, from_expr_id)?;
 
        self.visit_expr(ctx, to_expr_id)?;
 

	
 
        self.progress_slicing_expr(ctx, id)
 
    }
 

	
 
    fn visit_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> VisitorResult {
 
        // TODO: @Monomorph, this is a temporary hack, see other comments
 
        let expr = &mut ctx.heap[id];
 
        if let Field::Symbolic(field) = &mut expr.field {
 
            field.definition = None;
 
            field.field_idx = 0;
 
        }
 

	
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let select_expr = &ctx.heap[id];
 
        let subject_expr_id = select_expr.subject;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 

	
 
        self.progress_select_expr(ctx, id)
 
    }
 

	
 
    fn visit_literal_expr(&mut self, ctx: &mut Ctx, id: LiteralExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let literal_expr = &ctx.heap[id];
 
        match &literal_expr.value {
 
            Literal::Null | Literal::False | Literal::True |
 
            Literal::Integer(_) | Literal::Character(_) | Literal::String(_) => {
 
                // No subexpressions
 
            },
 
            Literal::Struct(literal) => {
 
                // TODO: @performance
 
                let expr_ids: Vec<_> = literal.fields
 
                    .iter()
 
                    .map(|f| f.value)
 
                    .collect();
 

	
 
                self.insert_initial_struct_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            },
 
            Literal::Enum(_) => {
 
                // Enumerations do not carry any subexpressions, but may still
 
                // have a user-defined polymorphic marker variable. For this 
 
                // reason we may still have to apply inference to this 
 
                // polymorphic variable
 
                self.insert_initial_enum_polymorph_data(ctx, id);
 
            },
 
            Literal::Union(literal) => {
 
                // May carry subexpressions and polymorphic arguments
 
                // TODO: @performance
 
                let expr_ids = literal.values.clone();
 
                self.insert_initial_union_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            },
 
            Literal::Array(expressions) => {
 
                // TODO: @performance
 
                let expr_ids = expressions.clone();
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            }
 
        }
 

	
 
        self.progress_literal_expr(ctx, id)
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.insert_initial_call_polymorph_data(ctx, id);
 

	
 
        // TODO: @performance
 
        let call_expr = &ctx.heap[id];
 
        for arg_expr_id in call_expr.arguments.clone() {
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        self.progress_call_expr(ctx, id)
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 
        let var_data = self.var_types.get_mut(var_expr.declaration.as_ref().unwrap()).unwrap();
 
        var_data.used_at.push(upcast_id);
 

	
 
        self.progress_variable_expr(ctx, id)
 
    }
 
}
 

	
 
macro_rules! debug_assert_expr_ids_unique_and_known {
 
    // Base case for a single expression ID
 
    ($resolver:ident, $id:ident) => {
 
        if cfg!(debug_assertions) {
 
            $resolver.expr_types.contains_key(&$id);
 
        }
 
    };
 
    // Base case for two expression IDs
 
    ($resolver:ident, $id1:ident, $id2:ident) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2);
 
    };
 
    // Generic case
 
    ($resolver:ident, $id1:ident, $id2:ident, $($tail:ident),+) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2, $($tail),+);
 
    };
 
}
 

	
 
macro_rules! debug_assert_ptrs_distinct {
 
    // Base case
 
    ($ptr1:ident, $ptr2:ident) => {
 
        debug_assert!(!std::ptr::eq($ptr1, $ptr2));
 
    };
 
    // Generic case
 
    ($ptr1:ident, $ptr2:ident, $($tail:ident),+) => {
 
        debug_assert_ptrs_distinct!($ptr1, $ptr2);
 
        debug_assert_ptrs_distinct!($ptr2, $($tail),+);
 
    };
 
}
 

	
 
impl PassTyping {
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError> {
 
        // Keep inferring until we can no longer make any progress
 
        while let Some(next_expr_id) = self.expr_queued.iter().next() {
 
            let next_expr_id = *next_expr_id;
 
            self.expr_queued.remove(&next_expr_id);
 
            self.progress_expr(ctx, next_expr_id)?;
 
        }
 

	
 
        // We check if we have all the types we need. If we're typechecking a 
 
        // polymorphic procedure more than once, then we have already annotated
 
        // the AST and have now performed typechecking for a different 
 
        // monomorph. In that case we just need to perform typechecking, no need
 
        // to annotate the AST again.
 
        // TODO: @Monomorph, this is completely wrong. It seemed okay, but it
 
        //  isn't. Each monomorph might result in completely different internal
 
        //  types.
 
        let definition_id = match &self.definition_type {
 
            DefinitionType::Component(id) => id.upcast(),
 
            DefinitionType::Function(id) => id.upcast(),
 
        };
 

	
 
        let already_checked = ctx.types.get_base_definition(&definition_id).unwrap().has_any_monomorph();
 
        for (expr_id, expr_type) in self.expr_types.iter_mut() {
 
            if !expr_type.is_done {
 
                // Auto-infer numberlike/integerlike types to a regular int
 
                if expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    expr_type.parts[0] = InferenceTypePart::SInt32;
 
                } else {
 
                    let expr = &ctx.heap[*expr_id];
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module.source, expr.span(), format!(
 
                            "could not fully infer the type of this expression (got '{}')",
 
                            expr_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 
            }
 

	
 
            if !already_checked {
 
                let concrete_type = ctx.heap[*expr_id].get_type_mut();
 
                expr_type.write_concrete_type(concrete_type);
 
            } else {
 
                if cfg!(debug_assertions) {
 
                    let mut concrete_type = ConcreteType::default();
 
                    expr_type.write_concrete_type(&mut concrete_type);
 
                    debug_assert_eq!(*ctx.heap[*expr_id].get_type(), concrete_type);
 
                }
 
            }
 
        }
 

	
 
        // All types are fine
 
        ctx.types.add_monomorph(&definition_id, self.poly_vars.clone());
 

	
 
        // Check all things we need to monomorphize
 
        // TODO: Struct/enum/union monomorphization
 
        for (expr_id, extra_data) in self.extra_data.iter() {
 
            if extra_data.poly_vars.is_empty() { continue; }
 

	
 
            // Retrieve polymorph variable specification. Those of struct 
 
            // literals and those of procedure calls need to be fully inferred.
 
            // The remaining ones (e.g. select expressions) allow partial 
 
            // inference of types, as long as the accessed field's type is
 
            // fully inferred.
 
            let needs_full_inference = match &ctx.heap[*expr_id] {
 
                Expression::Call(_) => true,
 
                Expression::Literal(_) => true,
 
                _ => false
 
            };
 

	
 
            if needs_full_inference {
 
                let mut monomorph_types = Vec::with_capacity(extra_data.poly_vars.len());
 
                for (poly_idx, poly_type) in extra_data.poly_vars.iter().enumerate() {
 
                    if !poly_type.is_done {
 
                        // TODO: Single clean function for function signatures and polyvars.
 
                        // TODO: Better error message
 
                        let expr = &ctx.heap[*expr_id];
 
                        return Err(ParseError::new_error_at_span(
 
                            &ctx.module.source, expr.span(), format!(
 
                                "could not fully infer the type of polymorphic variable {} of this expression (got '{}')",
 
                                poly_idx, poly_type.display_name(&ctx.heap)
 
                            )
 
                        ))
 
                    }
 

	
 
                    let mut concrete_type = ConcreteType::default();
 
                    poly_type.write_concrete_type(&mut concrete_type);
 
                    monomorph_types.insert(poly_idx, concrete_type);
 
                }
 

	
 
                // Resolve to the appropriate expression and instantiate 
 
                // monomorphs.
 
                match &ctx.heap[*expr_id] {
 
                    Expression::Call(call_expr) => {
 
                        // Add to type table if not yet typechecked
 
                        if call_expr.method == Method::UserFunction {
 
                            let definition_id = call_expr.definition;
 
                            if !ctx.types.has_monomorph(&definition_id, &monomorph_types) {
 
                                let root_id = ctx.types
 
                                    .get_base_definition(&definition_id)
 
                                    .unwrap()
 
                                    .ast_root;
 

	
 
                                // Pre-emptively add the monomorph to the type table, but
 
                                // we still need to perform typechecking on it
 
                                // TODO: Unsure about this, performance wise
 
                                let queue_element = ResolveQueueElement{
 
                                    root_id,
 
                                    definition_id,
 
                                    monomorph_types,
 
                                };
 
                                if !queue.contains(&queue_element) {
 
                                    queue.push(queue_element);
 
                                }
 
                            }
 
                        }
 
                    },
 
                    Expression::Literal(lit_expr) => {
 
                        let definition_id = match &lit_expr.value {
 
                            Literal::Struct(literal) => &literal.definition,
 
                            Literal::Enum(literal) => &literal.definition,
 
                            Literal::Union(literal) => &literal.definition,
 
                            _ => unreachable!("post-inference monomorph for non-struct, non-enum literal")
 
                        };
 
                        if !ctx.types.has_monomorph(definition_id, &monomorph_types) {
 
                            ctx.types.add_monomorph(definition_id, monomorph_types);
 
                        }
 
                    },
 
                    _ => unreachable!("needs fully inference, but not a struct literal or call expression")
 
                }
 
            } // else: was just a helper structure...
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_expr(&mut self, ctx: &mut Ctx, id: ExpressionId) -> Result<(), ParseError> {
 
        match &ctx.heap[id] {
 
            Expression::Assignment(expr) => {
 
                let id = expr.this;
 
                self.progress_assignment_expr(ctx, id)
 
            },
 
            Expression::Binding(_expr) => {
 
                unimplemented!("progress binding expression");
 
            },
 
            Expression::Conditional(expr) => {
 
                let id = expr.this;
 
                self.progress_conditional_expr(ctx, id)
 
            },
 
            Expression::Binary(expr) => {
 
                let id = expr.this;
 
                self.progress_binary_expr(ctx, id)
 
            },
 
            Expression::Unary(expr) => {
 
                let id = expr.this;
 
                self.progress_unary_expr(ctx, id)
 
            },
 
            Expression::Indexing(expr) => {
 
                let id = expr.this;
 
                self.progress_indexing_expr(ctx, id)
 
            },
 
            Expression::Slicing(expr) => {
 
                let id = expr.this;
 
                self.progress_slicing_expr(ctx, id)
 
            },
 
            Expression::Select(expr) => {
 
                let id = expr.this;
 
                self.progress_select_expr(ctx, id)
 
            },
 
            Expression::Literal(expr) => {
 
                let id = expr.this;
 
                self.progress_literal_expr(ctx, id)
 
            },
 
            Expression::Call(expr) => {
 
                let id = expr.this;
 
                self.progress_call_expr(ctx, id)
 
            },
 
            Expression::Variable(expr) => {
 
                let id = expr.this;
 
                self.progress_variable_expr(ctx, id)
 
            }
 
        }
 
    }
 

	
 
    fn progress_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> Result<(), ParseError> {
 
        use AssignmentOperator as AO;
 

	
 
        // TODO: Assignable check
 
        let upcast_id = id.upcast();
 

	
 
        // Assignment does not return anything (it operates like a statement)
 
        let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &VOID_TEMPLATE)?;
 

	
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.left;
 
        let arg2_expr_id = expr.right;
 

	
 
        debug_log!("Assignment expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let progress_base = match expr.operation {
 
        // Apply forced constraint to LHS value
 
        let progress_forced = match expr.operation {
 
            AO::Set =>
 
                false,
 
            AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted =>
 
                self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?,
 
                self.apply_forced_constraint(ctx, arg1_expr_id, &NUMBERLIKE_TEMPLATE)?,
 
            AO::Remained | AO::ShiftedLeft | AO::ShiftedRight |
 
            AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred =>
 
                self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?,
 
                self.apply_forced_constraint(ctx, arg1_expr_id, &INTEGERLIKE_TEMPLATE)?,
 
        };
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        let (progress_arg1, progress_arg2) = self.apply_equal2_constraint(
 
            ctx, upcast_id, arg1_expr_id, 0, arg2_expr_id, 0
 
        )?;
 
        debug_assert!(if progress_forced { progress_arg2 } else { true });
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg1 type [{}]: {}", progress_forced || progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_base || progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        if progress_base || progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_forced || progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> Result<(), ParseError> {
 
        // Note: test expression type is already enforced
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.true_expression;
 
        let arg2_expr_id = expr.false_expression;
 

	
 
        debug_log!("Conditional expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> Result<(), ParseError> {
 
        // Note: our expression type might be fixed by our parent, but we still
 
        // need to make sure it matches the type associated with our operation.
 
        use BinaryOperator as BO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_id = expr.left;
 
        let arg2_id = expr.right;
 

	
 
        debug_log!("Binary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = match expr.operation {
 
            BO::Concatenate => {
 
                // Arguments may be arrays/slices, output is always an array
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &ARRAYLIKE_TEMPLATE)?;
 

	
 
                // If they're all arraylike, then we want the subtype to match
 
                let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 1)?;
 

	
 
                (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
            },
 
            BO::LogicalOr | BO::LogicalAnd => {
 
                // Forced boolean on all
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &BOOL_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &BOOL_TEMPLATE)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::BitwiseOr | BO::BitwiseXor | BO::BitwiseAnd | BO::Remainder | BO::ShiftLeft | BO::ShiftRight => {
 
                // All equal of integer type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
            BO::Equality | BO::Inequality => {
 
                // Equal2 on args, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::LessThan | BO::GreaterThan | BO::LessThanEqual | BO::GreaterThanEqual => {
 
                // Equal2 on args with numberlike type, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg_base = self.apply_forced_constraint(ctx, arg1_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg_base || progress_arg1, progress_arg_base || progress_arg2)
 
            },
 
            BO::Add | BO::Subtract | BO::Multiply | BO::Divide => {
 
                // All equal of number type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_id); }
 
        if progress_arg2 { self.queue_expr(arg2_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> Result<(), ParseError> {
 
        use UnaryOperator as UO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg_id = expr.expression;
 

	
 
        debug_log!("Unary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg  type: {}", self.expr_types.get(&arg_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg) = match expr.operation {
 
            UO::Positive | UO::Negative => {
 
                // Equal types of numeric class
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::BitwiseNot | UO::PreIncrement | UO::PreDecrement | UO::PostIncrement | UO::PostDecrement => {
 
                // Equal types of integer class
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::LogicalNot => {
 
                // Both booleans
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                (progress_expr, progress_arg)
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg  type [{}]: {}", progress_arg, self.expr_types.get(&arg_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg { self.queue_expr(arg_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let index_id = expr.index;
 

	
 
        debug_log!("Indexing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Index   type: {}", self.expr_types.get(&index_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Make sure subject is arraylike and index is integerlike
 
        let progress_subject_base = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_index = self.apply_forced_constraint(ctx, index_id, &INTEGERLIKE_TEMPLATE)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Index   type [{}]: {}", progress_index, self.expr_types.get(&index_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_index { self.queue_expr(index_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let from_id = expr.from_index;
 
        let to_id = expr.to_index;
 

	
 
        debug_log!("Slicing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type: {}", self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type: {}", self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Make sure subject is arraylike and indices are of equal integerlike
 
        let progress_subject_base = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_idx_base = self.apply_forced_constraint(ctx, from_id, &INTEGERLIKE_TEMPLATE)?;
 
        let (progress_from, progress_to) = self.apply_equal2_constraint(ctx, upcast_id, from_id, 0, to_id, 0)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type [{}]: {}", progress_idx_base || progress_from, self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type [{}]: {}", progress_idx_base || progress_to, self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_idx_base || progress_from { self.queue_expr(from_id); }
 
        if progress_idx_base || progress_to { self.queue_expr(to_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        
 
        debug_log!("Select expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&ctx.heap[id].subject).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let expr = &mut ctx.heap[id];
 
        let subject_id = expr.subject;
 

	
 
        fn determine_inference_type_instance<'a>(types: &'a TypeTable, infer_type: &InferenceType) -> Result<Option<&'a DefinedType>, ()> {
 
            for part in &infer_type.parts {
 
                if part.is_marker() || !part.is_concrete() {
 
                    continue;
 
                }
 

	
 
                // Part is concrete, check if it is an instance of something
 
                if let InferenceTypePart::Instance(definition_id, _num_sub) = part {
 
                    // Lookup type definition and ensure the specified field 
 
                    // name exists on the struct
 
                    let definition = types.get_base_definition(definition_id);
 
                    debug_assert!(definition.is_some());
 
                    let definition = definition.unwrap();
 

	
 
                    return Ok(Some(definition))
 
                } else {
 
                    // Expected an instance of something
 
                    return Err(())
 
                }
 
            }
 

	
 
            // Nothing is concrete yet
 
            Ok(None)
 
        }
 

	
 
        let (progress_subject, progress_expr) = match &mut expr.field {
 
            Field::Length => {
 
                let progress_subject = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 

	
 
                (progress_subject, progress_expr)
 
            },
 
            Field::Symbolic(field) => {
 
                // Retrieve the struct definition id and field index if possible 
 
                // and not previously determined
 
                if field.definition.is_none() {
 
                    // Not yet known, check if we can determine it
 
                    let subject_type = self.expr_types.get(&subject_id).unwrap();
 
                    let type_def = determine_inference_type_instance(&ctx.types, subject_type);
 

	
 
                    match type_def {
 
                        Ok(Some(type_def)) => {
 
                            // Subject type is known, check if it is a 
 
                            // struct and the field exists on the struct
 
                            let struct_def = if let DefinedTypeVariant::Struct(struct_def) = &type_def.definition {
 
                                struct_def
 
                            } else {
 
                                return Err(ParseError::new_error_at_span(
 
                                    &ctx.module.source, field.identifier.span, format!(
 
                                        "Can only apply field access to structs, got a subject of type '{}'",
 
                                        subject_type.display_name(&ctx.heap)
 
                                    )
 
                                ));
 
                            };
 

	
 
                            for (field_def_idx, field_def) in struct_def.fields.iter().enumerate() {
 
                                if field_def.identifier == field.identifier {
 
                                    // Set field definition and index
 
                                    field.definition = Some(type_def.ast_definition);
 
                                    field.field_idx = field_def_idx;
 
                                    break;
 
                                }
 
                            }
 

	
 
                            if field.definition.is_none() {
 
                                let field_span = field.identifier.span;
 
                                let ast_struct_def = ctx.heap[type_def.ast_definition].as_struct();
 
                                return Err(ParseError::new_error_at_span(
 
                                    &ctx.module.source, field_span, format!(
 
                                        "this field does not exist on the struct '{}'",
 
                                        ast_struct_def.identifier.value.as_str()
 
                                    )
 
                                ))
 
                            }
 

	
 
                            // Encountered definition and field index for the
 
                            // first time
 
                            self.insert_initial_select_polymorph_data(ctx, id);
 
                        },
 
                        Ok(None) => {
 
                            // Type of subject is not yet known, so we 
 
                            // cannot make any progress yet
 
                            return Ok(())
 
                        },
 
                        Err(()) => {
 
                            return Err(ParseError::new_error_at_span(
 
                                &ctx.module.source, field.identifier.span, format!(
 
                                    "Can only apply field access to structs, got a subject of type '{}'",
 
                                    subject_type.display_name(&ctx.heap)
 
                                )
 
                            ));
 
                        }
 
                    }
 
                }
 

	
 
                // If here then field definition and index are known, and the
 
                // initial type (based on the struct's definition) has been
 
                // applied.
 
                // Check to see if we can infer anything about the subject's and
 
                // the field's polymorphic variables
 
                let poly_data = self.extra_data.get_mut(&upcast_id).unwrap();
 
                let mut poly_progress = HashSet::new();
 
                
 
                // Apply to struct's type
 
                let signature_type: *mut _ = &mut poly_data.embedded[0];
 
                let subject_type: *mut _ = self.expr_types.get_mut(&subject_id).unwrap();
 

	
 
                let (_, progress_subject) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, Some(subject_id), poly_data, &mut poly_progress,
 
                    signature_type, 0, subject_type, 0
 
                )?;
 

	
 
                if progress_subject {
 
                    self.expr_queued.insert(subject_id);
 
                }
 
                
 
                // Apply to field's type
 
                let signature_type: *mut _ = &mut poly_data.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, poly_data, &mut poly_progress, 
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                if progress_expr {
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 

	
 
                // Reapply progress in polymorphic variables to struct's type
 
                let signature_type: *mut _ = &mut poly_data.embedded[0];
 
                let subject_type: *mut _ = self.expr_types.get_mut(&subject_id).unwrap();
 
                
 
                let progress_subject = Self::apply_equal2_polyvar_constraint(
 
                    poly_data, &poly_progress, signature_type, subject_type
 
                );
 
@@ -1977,1030 +1993,1029 @@ impl PassTyping {
 
            },
 
            Literal::Character(_) => {
 
                self.apply_forced_constraint(ctx, upcast_id, &CHARACTER_TEMPLATE)?;
 
                todo!("check character literal type inference");
 
            },
 
            Literal::String(_) => {
 
                self.apply_forced_constraint(ctx, upcast_id, &STRING_TEMPLATE)?;
 
                todo!("check string literal type inference");
 
            },
 
            Literal::Struct(data) => {
 
                let extra = self.extra_data.get_mut(&upcast_id).unwrap();
 
                for _poly in &extra.poly_vars {
 
                    debug_log!(" * Poly: {}", _poly.display_name(&ctx.heap));
 
                }
 
                let mut poly_progress = HashSet::new();
 
                debug_assert_eq!(extra.embedded.len(), data.fields.len());
 

	
 
                debug_log!(" * During (inferring types from fields and struct type):");
 

	
 
                // Mutually infer field signature/expression types
 
                for (field_idx, field) in data.fields.iter().enumerate() {
 
                    let field_expr_id = field.value;
 
                    let signature_type: *mut _ = &mut extra.embedded[field_idx];
 
                    let field_type: *mut _ = self.expr_types.get_mut(&field_expr_id).unwrap();
 
                    let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                        ctx, upcast_id, Some(field_expr_id), extra, &mut poly_progress,
 
                        signature_type, 0, field_type, 0
 
                    )?;
 

	
 
                    debug_log!(
 
                        "   - Field {} type | sig: {}, field: {}", field_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*field_type}.display_name(&ctx.heap)
 
                    );
 

	
 
                    if progress_arg {
 
                        self.expr_queued.insert(field_expr_id);
 
                    }
 
                }
 

	
 
                debug_log!("   - Field poly progress | {:?}", poly_progress);
 

	
 
                // Same for the type of the struct itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, extra, &mut poly_progress,
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                debug_log!(
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 
                debug_log!("   - Ret poly progress | {:?}", poly_progress);
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup, cannot call utility self.queue_parent thingo
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 

	
 
                // Check which expressions use the polymorphic arguments. If the
 
                // polymorphic variables have been progressed then we try to 
 
                // progress them inside the expression as well.
 
                debug_log!(" * During (reinferring from progressed polyvars):");
 

	
 
                // For all field expressions
 
                for field_idx in 0..extra.embedded.len() {
 
                    debug_assert_eq!(field_idx, data.fields[field_idx].field_idx, "confusing, innit?");
 
                    let signature_type: *mut _ = &mut extra.embedded[field_idx];
 
                    let field_expr_id = data.fields[field_idx].value;
 
                    let field_type: *mut _ = self.expr_types.get_mut(&field_expr_id).unwrap();
 

	
 
                    let progress_arg = Self::apply_equal2_polyvar_constraint(
 
                        extra, &poly_progress, signature_type, field_type
 
                    );
 

	
 
                    debug_log!(
 
                        "   - Field {} type | sig: {}, field: {}", field_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*field_type}.display_name(&ctx.heap)
 
                    );
 
                    if progress_arg {
 
                        self.expr_queued.insert(field_expr_id);
 
                    }
 
                }
 
                
 
                // For the return type
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Enum(_) => {
 
                let extra = self.extra_data.get_mut(&upcast_id).unwrap();
 
                for _poly in &extra.poly_vars {
 
                    debug_log!(" * Poly: {}", _poly.display_name(&ctx.heap));
 
                }
 
                let mut poly_progress = HashSet::new();
 
                
 
                debug_log!(" * During (inferring types from return type)");
 

	
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, extra, &mut poly_progress,
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                debug_log!(
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 

	
 
                debug_log!(" * During (reinferring from progress polyvars):");
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Union(data) => {
 
                let extra = self.extra_data.get_mut(&upcast_id).unwrap();
 
                for _poly in &extra.poly_vars {
 
                    debug_log!(" * Poly: {}", _poly.display_name(&ctx.heap));
 
                }
 
                let mut poly_progress = HashSet::new();
 
                debug_assert_eq!(extra.embedded.len(), data.values.len());
 

	
 
                debug_log!(" * During (inferring types from variant values and union type):");
 

	
 
                // Mutually infer union variant values
 
                for (value_idx, value_expr_id) in data.values.iter().enumerate() {
 
                    let value_expr_id = *value_expr_id;
 
                    let signature_type: *mut _ = &mut extra.embedded[value_idx];
 
                    let value_type: *mut _ = self.expr_types.get_mut(&value_expr_id).unwrap();
 
                    let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                        ctx, upcast_id, Some(value_expr_id), extra, &mut poly_progress,
 
                        signature_type, 0, value_type, 0 
 
                    )?;
 

	
 
                    debug_log!(
 
                        "   - Value {} type | sig: {}, field: {}", value_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*value_type}.display_name(&ctx.heap)
 
                    );
 

	
 
                    if progress_arg {
 
                        self.expr_queued.insert(value_expr_id);
 
                    }
 
                }
 

	
 
                debug_log!("   - Field poly progress | {:?}", poly_progress);
 

	
 
                // Infer type of union itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, extra, &mut poly_progress,
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                debug_log!(
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 
                debug_log!("   - Ret poly progress | {:?}", poly_progress);
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup, borrowing rules
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 

	
 
                debug_log!(" * During (reinferring from progress polyvars):");
 
            
 
                // For all embedded values of the union variant
 
                for value_idx in 0..extra.embedded.len() {
 
                    let signature_type: *mut _ = &mut extra.embedded[value_idx];
 
                    let value_expr_id = data.values[value_idx];
 
                    let value_type: *mut _ = self.expr_types.get_mut(&value_expr_id).unwrap();
 
                    
 
                    let progress_arg = Self::apply_equal2_polyvar_constraint(
 
                        extra, &poly_progress, signature_type, value_type
 
                    );
 

	
 
                    debug_log!(
 
                        "   - Value {} type | sig: {}, value: {}", value_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*value_type}.display_name(&ctx.heap)
 
                    );
 
                    if progress_arg {
 
                        self.expr_queued.insert(value_expr_id);
 
                    }
 
                }
 

	
 
                // And for the union type itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Array(data) => {
 
                let expr_elements = data.clone(); // TODO: @performance
 
                debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
                debug_log!(" * Before:");
 
                debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
                // All elements should have an equal type
 
                let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?;
 
                for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) {
 
                    if *progress_arg {
 
                        self.queue_expr(*arg_id);
 
                    }
 
                }
 

	
 
                // And the output should be an array of the element types
 
                let mut progress_expr = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                if !expr_elements.is_empty() {
 
                    let first_arg_id = expr_elements[0];
 
                    let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint(
 
                        ctx, upcast_id, upcast_id, 1, first_arg_id, 0
 
                    )?;
 

	
 
                    progress_expr = progress_expr || inner_expr_progress;
 

	
 
                    // Note that if the array type progressed the type of the arguments,
 
                    // then we should enqueue this progression function again
 
                    // TODO: @fix Make apply_equal_n accept a start idx as well
 
                    if arg_progress { self.queue_expr(upcast_id); }
 
                }
 

	
 
                debug_log!(" * After:");
 
                debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
                progress_expr
 
            },
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // TODO: FIX!!!!
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    // TODO: @cleanup, see how this can be cleaned up once I implement
 
    //  polymorphic struct/enum/union literals. These likely follow the same
 
    //  pattern as here.
 
    fn progress_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let extra = self.extra_data.get_mut(&upcast_id).unwrap();
 

	
 
        debug_log!("Call expr '{}': {}", ctx.heap[expr.definition].identifier().value.as_str(), upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 
        debug_log!(" * During (inferring types from arguments and return type):");
 

	
 
        // Check if we can make progress using the arguments and/or return types
 
        // while keeping track of the polyvars we've extended
 
        let mut poly_progress = HashSet::new();
 
        debug_assert_eq!(extra.embedded.len(), expr.arguments.len());
 

	
 
        for (arg_idx, arg_id) in expr.arguments.clone().into_iter().enumerate() {
 
            let signature_type: *mut _ = &mut extra.embedded[arg_idx];
 
            let argument_type: *mut _ = self.expr_types.get_mut(&arg_id).unwrap();
 
            let (_, progress_arg) = Self::apply_equal2_signature_constraint(
 
                ctx, upcast_id, Some(arg_id), extra, &mut poly_progress,
 
                signature_type, 0, argument_type, 0
 
            )?;
 

	
 
            debug_log!(
 
                "   - Arg {} type | sig: {}, arg: {}", arg_idx,
 
                unsafe{&*signature_type}.display_name(&ctx.heap), 
 
                unsafe{&*argument_type}.display_name(&ctx.heap));
 

	
 
            if progress_arg {
 
                // Progressed argument expression
 
                self.expr_queued.insert(arg_id);
 
            }
 
        }
 

	
 
        // Do the same for the return type
 
        let signature_type: *mut _ = &mut extra.returned;
 
        let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 
        let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
            ctx, upcast_id, None, extra, &mut poly_progress,
 
            signature_type, 0, expr_type, 0
 
        )?;
 

	
 
        debug_log!(
 
            "   - Ret type | sig: {}, expr: {}", 
 
            unsafe{&*signature_type}.display_name(&ctx.heap), 
 
            unsafe{&*expr_type}.display_name(&ctx.heap)
 
        );
 

	
 
        if progress_expr {
 
            // TODO: @cleanup, cannot call utility self.queue_parent thingo
 
            if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                self.expr_queued.insert(parent_id);
 
            }
 
        }
 

	
 
        // If we did not have an error in the polymorph inference above, then
 
        // reapplying the polymorph type to each argument type and the return
 
        // type should always succeed.
 
        debug_log!(" * During (reinferring from progressed polyvars):");
 
        for (_poly_idx, _poly_var) in extra.poly_vars.iter().enumerate() {
 
            debug_log!("   - Poly {} | sig: {}", _poly_idx, _poly_var.display_name(&ctx.heap));
 
        }
 
        // TODO: @performance If the algorithm is changed to be more "on demand
 
        //  argument re-evaluation", instead of "all-argument re-evaluation",
 
        //  then this is no longer true
 
        for arg_idx in 0..extra.embedded.len() {
 
            let signature_type: *mut _ = &mut extra.embedded[arg_idx];
 
            let arg_expr_id = expr.arguments[arg_idx];
 
            let arg_type: *mut _ = self.expr_types.get_mut(&arg_expr_id).unwrap();
 
            
 
            let progress_arg = Self::apply_equal2_polyvar_constraint(
 
                extra, &poly_progress,
 
                signature_type, arg_type
 
            );
 
            
 
            debug_log!(
 
                "   - Arg {} type | sig: {}, arg: {}", arg_idx, 
 
                unsafe{&*signature_type}.display_name(&ctx.heap), 
 
                unsafe{&*arg_type}.display_name(&ctx.heap)
 
            );
 
            if progress_arg {
 
                self.expr_queued.insert(arg_expr_id);
 
            }
 
        }
 

	
 
        // Once more for the return type
 
        let signature_type: *mut _ = &mut extra.returned;
 
        let ret_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
        let progress_ret = Self::apply_equal2_polyvar_constraint(
 
            extra, &poly_progress, signature_type, ret_type
 
        );
 
        debug_log!(
 
            "   - Ret type | sig: {}, arg: {}", 
 
            unsafe{&*signature_type}.display_name(&ctx.heap), 
 
            unsafe{&*ret_type}.display_name(&ctx.heap)
 
        );
 
        if progress_ret {
 
            self.queue_expr_parent(ctx, upcast_id);
 
        }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let var_expr = &ctx.heap[id];
 
        let var_id = var_expr.declaration.unwrap();
 

	
 
        debug_log!("Variable expr '{}': {}", ctx.heap[var_id].identifier().value.as_str(), upcast_id.index);
 
        debug_log!("Variable expr '{}': {}", ctx.heap[var_id].identifier.value.as_str(), upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Var  type: {}", self.var_types.get(&var_id).unwrap().var_type.display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Retrieve shared variable type and expression type and apply inference
 
        let var_data = self.var_types.get_mut(&var_id).unwrap();
 
        let expr_type = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            &mut var_data.var_type as *mut _, 0, expr_type, 0
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            let var_decl = &ctx.heap[var_id];
 
            return Err(ParseError::new_error_at_span(
 
                &ctx.module.source, var_decl.identifier.span, format!(
 
                    "Conflicting types for this variable, previously assigned the type '{}'",
 
                    var_data.var_type.display_name(&ctx.heap)
 
                )
 
            ).with_info_at_span(
 
                &ctx.module.source, var_expr.identifier.span, format!(
 
                    "But inferred to have incompatible type '{}' here",
 
                    expr_type.display_name(&ctx.heap)
 
                )
 
            ))
 
        }
 

	
 
        let progress_var = infer_res.modified_lhs();
 
        let progress_expr = infer_res.modified_rhs();
 

	
 
        if progress_var {
 
            // Let other variable expressions using this type progress as well
 
            for other_expr in var_data.used_at.iter() {
 
                if *other_expr != upcast_id {
 
                    self.expr_queued.insert(*other_expr);
 
                }
 
            }
 

	
 
            // Let a linked port know that our type has updated
 
            if let Some(linked_id) = var_data.linked_var {
 
                // Only perform one-way inference to prevent updating our type, this
 
                // would lead to an inconsistency
 
                let var_type: *mut _ = &mut var_data.var_type;
 
                let link_data = self.var_types.get_mut(&linked_id).unwrap();
 

	
 
                debug_assert!(
 
                    unsafe{&*var_type}.parts[0] == InferenceTypePart::Input ||
 
                    unsafe{&*var_type}.parts[0] == InferenceTypePart::Output
 
                );
 
                debug_assert!(
 
                    link_data.var_type.parts[0] == InferenceTypePart::Input ||
 
                    link_data.var_type.parts[0] == InferenceTypePart::Output
 
                );
 
                match InferenceType::infer_subtree_for_single_type(&mut link_data.var_type, 1, &unsafe{&*var_type}.parts, 1) {
 
                    SingleInferenceResult::Modified => {
 
                        for other_expr in &link_data.used_at {
 
                            self.expr_queued.insert(*other_expr);
 
                        }
 
                    },
 
                    SingleInferenceResult::Unmodified => {},
 
                    SingleInferenceResult::Incompatible => {
 
                        let var_data = self.var_types.get(&var_id).unwrap();
 
                        let link_data = self.var_types.get(&linked_id).unwrap();
 
                        let var_decl = &ctx.heap[var_id];
 
                        let link_decl = &ctx.heap[linked_id];
 

	
 
                        return Err(ParseError::new_error_at_span(
 
                            &ctx.module.source, var_decl.identifier.span, format!(
 
                                "Conflicting types for this variable, assigned the type '{}'",
 
                                var_data.var_type.display_name(&ctx.heap)
 
                            )
 
                        ).with_info_at_span(
 
                            &ctx.module.source, link_decl.identifier.span, format!(
 
                                "Because it is incompatible with this variable, assigned the type '{}'",
 
                                link_data.var_type.display_name(&ctx.heap)
 
                            )
 
                        ));
 
                    }
 
                }
 
            }
 
        }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Var  type [{}]: {}", progress_var, self.var_types.get(&var_id).unwrap().var_type.display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        Ok(())
 
    }
 

	
 
    fn queue_expr_parent(&mut self, ctx: &Ctx, expr_id: ExpressionId) {
 
        if let ExpressionParent::Expression(parent_expr_id, _) = &ctx.heap[expr_id].parent() {
 
            self.expr_queued.insert(*parent_expr_id);
 
        }
 
    }
 

	
 
    fn queue_expr(&mut self, expr_id: ExpressionId) {
 
        self.expr_queued.insert(expr_id);
 
    }
 

	
 
    /// Applies a forced type constraint: the type associated with the supplied
 
    /// expression will be molded into the provided "template". The template may
 
    /// be fully specified (e.g. a bool) or contain "inference" variables (e.g.
 
    /// an array of T)
 
    fn apply_forced_constraint(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> Result<bool, ParseError> {
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id);
 
        let expr_type = self.expr_types.get_mut(&expr_id).unwrap();
 
        match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(
 
                self.construct_template_type_error(ctx, expr_id, template)
 
            )
 
        }
 
    }
 

	
 
    fn apply_forced_constraint_types(
 
        to_infer: *mut InferenceType, to_infer_start_idx: usize,
 
        template: &[InferenceTypePart], template_start_idx: usize
 
    ) -> Result<bool, ()> {
 
        match InferenceType::infer_subtree_for_single_type(
 
            unsafe{ &mut *to_infer }, to_infer_start_idx,
 
            template, template_start_idx
 
        ) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(()),
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects the two provided types to be
 
    /// equal. We attempt to make progress in inferring the types. If the call
 
    /// is successful then the composition of all types are made equal.
 
    /// The "parent" `expr_id` is provided to construct errors.
 
    fn apply_equal2_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg1_start_idx: usize,
 
        arg2_id: ExpressionId, arg2_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError> {
 
        debug_assert_expr_ids_unique_and_known!(self, arg1_id, arg2_id);
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            arg1_type, arg1_start_idx,
 
            arg2_type, arg2_start_idx
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        Ok((infer_res.modified_lhs(), infer_res.modified_rhs()))
 
    }
 

	
 
    /// Applies an equal2 constraint between a signature type (e.g. a function
 
    /// argument or struct field) and an expression whose type should match that
 
    /// expression. If we make progress on the signature, then we try to see if
 
    /// any of the embedded polymorphic types can be progressed.
 
    ///
 
    /// `outer_expr_id` is the main expression we're progressing (e.g. a 
 
    /// function call), while `expr_id` is the embedded expression we're 
 
    /// matching against the signature. `expression_type` and 
 
    /// `expression_start_idx` belong to `expr_id`.
 
    fn apply_equal2_signature_constraint(
 
        ctx: &Ctx, outer_expr_id: ExpressionId, expr_id: Option<ExpressionId>,
 
        polymorph_data: &mut ExtraData, polymorph_progress: &mut HashSet<usize>,
 
        signature_type: *mut InferenceType, signature_start_idx: usize,
 
        expression_type: *mut InferenceType, expression_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError> {
 
        // Safety: cannot mutually infer the same types
 
        //         polymorph_data containers may not be modified
 
        debug_assert_ptrs_distinct!(signature_type, expression_type);
 

	
 
        // Infer the signature and expression type
 
        let infer_res = unsafe { 
 
            InferenceType::infer_subtrees_for_both_types(
 
                signature_type, signature_start_idx,
 
                expression_type, expression_start_idx
 
            ) 
 
        };
 

	
 
        if infer_res == DualInferenceResult::Incompatible {
 
            // TODO: Check if I still need to use this
 
            let outer_span = ctx.heap[outer_expr_id].span();
 
            let (span_name, span) = match expr_id {
 
                Some(expr_id) => ("argument's", ctx.heap[expr_id].span()),
 
                None => ("type's", outer_span)
 
            };
 
            let (signature_display_type, expression_display_type) = unsafe { (
 
                (&*signature_type).display_name(&ctx.heap),
 
                (&*expression_type).display_name(&ctx.heap)
 
            ) };
 

	
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module.source, outer_span,
 
                "failed to fully resolve the types of this expression"
 
            ).with_info_at_span(
 
                &ctx.module.source, span, format!(
 
                    "because the {} signature has been resolved to '{}', but the expression has been resolved to '{}'",
 
                    span_name, signature_display_type, expression_display_type
 
                )
 
            ));
 
        }
 

	
 
        // Try to see if we can progress any of the polymorphic variables
 
        let progress_sig = infer_res.modified_lhs();
 
        let progress_expr = infer_res.modified_rhs();
 

	
 
        if progress_sig {
 
            let signature_type = unsafe{&mut *signature_type};
 
            debug_assert!(
 
                signature_type.has_body_marker, 
 
                "made progress on signature type, but it doesn't have a marker"
 
            );
 
            for (poly_idx, poly_section) in signature_type.body_marker_iter() {
 
                let polymorph_type = &mut polymorph_data.poly_vars[poly_idx];
 
                match Self::apply_forced_constraint_types(
 
                    polymorph_type, 0, poly_section, 0
 
                ) {
 
                    Ok(true) => { polymorph_progress.insert(poly_idx); },
 
                    Ok(false) => {},
 
                    Err(()) => { return Err(Self::construct_poly_arg_error(ctx, polymorph_data, outer_expr_id))}
 
                }
 
            }
 
        }
 
        Ok((progress_sig, progress_expr))
 
    }
 

	
 
    /// Applies equal2 constraints on the signature type for each of the 
 
    /// polymorphic variables. If the signature type is progressed then we 
 
    /// progress the expression type as well.
 
    ///
 
    /// This function assumes that the polymorphic variables have already been
 
    /// progressed as far as possible by calling 
 
    /// `apply_equal2_signature_constraint`. As such, we expect to not encounter
 
    /// any errors.
 
    ///
 
    /// This function returns true if the expression's type has been progressed
 
    fn apply_equal2_polyvar_constraint(
 
        polymorph_data: &ExtraData, _polymorph_progress: &HashSet<usize>,
 
        signature_type: *mut InferenceType, expr_type: *mut InferenceType
 
    ) -> bool {
 
        // Safety: all pointers should be distinct
 
        //         polymorph_data contains may not be modified
 
        debug_assert_ptrs_distinct!(signature_type, expr_type);
 
        let signature_type = unsafe{&mut *signature_type};
 
        let expr_type = unsafe{&mut *expr_type};
 

	
 
        // Iterate through markers in signature type to try and make progress
 
        // on the polymorphic variable        
 
        let mut seek_idx = 0;
 
        let mut modified_sig = false;
 
        
 
        while let Some((poly_idx, start_idx)) = signature_type.find_body_marker(seek_idx) {
 
            let end_idx = InferenceType::find_subtree_end_idx(&signature_type.parts, start_idx);
 
            // if polymorph_progress.contains(&poly_idx) {
 
                // Need to match subtrees
 
                let polymorph_type = &polymorph_data.poly_vars[poly_idx];
 
                debug_log!("   - DEBUG: Applying {} to '{}' from '{}'", polymorph_type.display_name(heap), InferenceType::partial_display_name(heap, &signature_type.parts[start_idx..]), signature_type.display_name(heap));
 
                let modified_at_marker = Self::apply_forced_constraint_types(
 
                    signature_type, start_idx, 
 
                    &polymorph_type.parts, 0
 
                ).expect("no failure when applying polyvar constraints");
 

	
 
                modified_sig = modified_sig || modified_at_marker;
 
            // }
 

	
 
            seek_idx = end_idx;
 
        }
 

	
 
        // If we made any progress on the signature's type, then we also need to
 
        // apply it to the expression that is supposed to match the signature.
 
        if modified_sig {
 
            match InferenceType::infer_subtree_for_single_type(
 
                expr_type, 0, &signature_type.parts, 0
 
            ) {
 
                SingleInferenceResult::Modified => true,
 
                SingleInferenceResult::Unmodified => false,
 
                SingleInferenceResult::Incompatible =>
 
                    unreachable!("encountered failure while reapplying modified signature to expression after polyvar inference")
 
            }
 
        } else {
 
            false
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects all three provided types to be
 
    /// equal. In case we can make progress in inferring the types then we
 
    /// attempt to do so. If the call is successful then the composition of all
 
    /// types is made equal.
 
    fn apply_equal3_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId,
 
        start_idx: usize
 
    ) -> Result<(bool, bool, bool), ParseError> {
 
        // Safety: all expression IDs are always distinct, and we do not modify
 
        //  the container
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id, arg1_id, arg2_id);
 
        let expr_type: *mut _ = self.expr_types.get_mut(&expr_id).unwrap();
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let expr_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx)
 
        };
 
        if expr_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_expr_type_error(ctx, expr_id, arg1_id));
 
        }
 

	
 
        let args_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) };
 
        if args_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        // If all types are compatible, but the second call caused the arg1_type
 
        // to be expanded, then we must also assign this to expr_type.
 
        let mut progress_expr = expr_res.modified_lhs();
 
        let mut progress_arg1 = expr_res.modified_rhs();
 
        let progress_arg2 = args_res.modified_rhs();
 

	
 
        if args_res.modified_lhs() { 
 
            unsafe {
 
                let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx);
 
                let subtree = &((*arg2_type).parts[start_idx..end_idx]);
 
                (*expr_type).replace_subtree(start_idx, subtree);
 
            }
 
            progress_expr = true;
 
            progress_arg1 = true;
 
        }
 

	
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError> {
 
        // Early exit
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
        }
 

	
 
        let mut progress = Vec::new();
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let arg1_type: *mut _ = self.expr_types.get_mut(&last_arg_id).unwrap();
 
            let arg2_type: *mut _ = self.expr_types.get_mut(next_arg_id).unwrap();
 

	
 
            let res = unsafe {
 
                InferenceType::infer_subtrees_for_both_types(arg1_type, 0, arg2_type, 0)
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
                // We re-inferred something on the left hand side, so everything
 
                // up until now should be re-inferred.
 
                progress[lhs_arg_idx] = true;
 
                last_lhs_progressed = lhs_arg_idx;
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_type: *mut _ = self.expr_types.get_mut(args.last().unwrap()).unwrap();
 
        for arg_idx in 0..last_lhs_progressed {
 
            let arg_type: *mut _ = self.expr_types.get_mut(&args[arg_idx]).unwrap();
 
            unsafe{
 
                (*arg_type).replace_subtree(0, &(*last_type).parts);
 
            }
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
 
    /// expression parent. Note that if the parent is another expression, we do
 
    /// not take special action, instead we let parent expressions fix the type
 
    /// of subexpressions before they have a chance to call this function.
 
    fn insert_initial_expr_inference_type(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId
 
    ) -> Result<(), ParseError> {
 
        use ExpressionParent as EP;
 
        use InferenceTypePart as ITP;
 

	
 
        let expr = &ctx.heap[expr_id];
 
        let inference_type = match expr.parent() {
 
            EP::None =>
 
                // Should have been set by linker
 
                unreachable!(),
 
            EP::ExpressionStmt(_) | EP::Expression(_, _) =>
 
                // Determined during type inference
 
                InferenceType::new(false, false, vec![ITP::Unknown]),
 
            EP::If(_) | EP::While(_) =>
 
                // Must be a boolean
 
                InferenceType::new(false, true, vec![ITP::Bool]),
 
            EP::Return(_) =>
 
                // Must match the return type of the function
 
                if let DefinitionType::Function(func_id) = self.definition_type {
 
                    debug_assert_eq!(ctx.heap[func_id].return_types.len(), 1);
 
                    let returned = &ctx.heap[func_id].return_types[0];
 
                    self.determine_inference_type_from_parser_type_elements(&returned.elements, true)
 
                } else {
 
                    // Cannot happen: definition always set upon body traversal
 
                    // and "return" calls in components are illegal.
 
                    unreachable!();
 
                },
 
            EP::New(_) =>
 
                // Must be a component call, which we assign a "Void" return
 
                // type
 
                InferenceType::new(false, true, vec![ITP::Void]),
 
        };
 

	
 
        match self.expr_types.entry(expr_id) {
 
            Entry::Vacant(vacant) => {
 
                vacant.insert(inference_type);
 
            },
 
            Entry::Occupied(mut preexisting) => {
 
                // We already have an entry, this happens if our parent fixed
 
                // our type (e.g. we're used in a conditional expression's test)
 
                // but we have a different type.
 
                // TODO: Is this ever called? Seems like it can't
 
                debug_assert!(false, "I am actually called, my ID is {}", expr_id.index);
 
                let old_type = preexisting.get_mut();
 
                if let SingleInferenceResult::Incompatible = InferenceType::infer_subtree_for_single_type(
 
                    old_type, 0, &inference_type.parts, 0
 
                ) {
 
                    return Err(self.construct_expr_type_error(ctx, expr_id, expr_id))
 
                }
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn insert_initial_call_polymorph_data(
 
        &mut self, ctx: &mut Ctx, call_id: CallExpressionId
 
    ) {
 
        // Note: the polymorph variables may be partially specified and may
 
        // contain references to the wrapping definition's (i.e. the proctype
 
        // we are currently visiting) polymorphic arguments.
 
        //
 
        // The arguments of the call may refer to polymorphic variables in the
 
        // definition of the function we're calling, not of the wrapping
 
        // definition. We insert markers in these inferred types to be able to
 
        // map them back and forth to the polymorphic arguments of the function
 
        // we are calling.
 
        let call = &ctx.heap[call_id];
 

	
 
        // Handle the polymorphic arguments (if there are any)
 
        let num_poly_args = call.parser_type.elements[0].variant.num_embedded();
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 
        for embedded_elements in call.parser_type.iter_embedded(0) {
 
            poly_args.push(self.determine_inference_type_from_parser_type_elements(embedded_elements, true));
 
        }
 

	
 
        // Handle the arguments and return types
 
        let definition = &ctx.heap[call.definition];
 
        let (parameters, returned) = match definition {
 
            Definition::Component(definition) => {
 
                debug_assert_eq!(poly_args.len(), definition.poly_vars.len());
 
                (&definition.parameters, None)
 
            },
 
            Definition::Function(definition) => {
 
                debug_assert_eq!(poly_args.len(), definition.poly_vars.len());
 
                (&definition.parameters, Some(&definition.return_types))
 
            },
 
            Definition::Struct(_) | Definition::Enum(_) | Definition::Union(_) => {
 
                unreachable!("insert_initial_call_polymorph data for non-procedure type");
 
            },
 
        };
 

	
 
        let mut parameter_types = Vec::with_capacity(parameters.len());
 
        for parameter_id in parameters.clone().into_iter() { // TODO: @Performance
 
            let param = &ctx.heap[parameter_id];
 
            parameter_types.push(self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, false));
 
        }
 

	
 
        let return_type = match returned {
 
            None => {
 
                // Component, so returns a "Void"
 
                InferenceType::new(false, true, vec![InferenceTypePart::Void])
 
            },
 
            Some(returned) => {
 
                debug_assert_eq!(returned.len(), 1);
 
                let returned = &returned[0];
 
                self.determine_inference_type_from_parser_type_elements(&returned.elements, false)
 
            }
 
        };
 

	
 
        self.extra_data.insert(call_id.upcast(), ExtraData {
 
            poly_vars: poly_args,
 
            embedded: parameter_types,
 
            returned: return_type
 
        });
 
    }
 

	
 
    fn insert_initial_struct_polymorph_data(
 
        &mut self, ctx: &mut Ctx, lit_id: LiteralExpressionId,
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = ctx.heap[lit_id].value.as_struct();
 

	
 
        // Handle polymorphic arguments
 
        let num_embedded = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_embedded);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle parser types on struct definition
 
        let defined_type = ctx.types.get_base_definition(&literal.definition).unwrap();
 
        let struct_type = defined_type.definition.as_struct();
 
        debug_assert_eq!(poly_args.len(), defined_type.poly_vars.len());
 

	
 
        // Note: programmer is capable of specifying fields in a struct literal
 
        // in a different order than on the definition. We take the literal-
 
        // specified order to be leading.
 
        let mut embedded_types = Vec::with_capacity(struct_type.fields.len());
 
        for lit_field in literal.fields.iter() {
 
            let def_field = &struct_type.fields[lit_field.field_idx];
 
            let inference_type = self.determine_inference_type_from_parser_type_elements(&def_field.parser_type.elements, false);
 
            embedded_types.push(inference_type);
 
        }
 

	
 
        // Return type is the struct type itself, with the appropriate 
 
        // polymorphic variables. So:
 
        // - 1 part for definition
 
        // - N_poly_arg marker parts for each polymorphic argument
 
        // - all the parts for the currently known polymorphic arguments 
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(literal.definition, poly_args.len()));
 
        let mut return_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { return_type_done = false; }
 

	
 
            parts.push(ITP::MarkerBody(poly_var_idx));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts.len(), parts_reserved);
 
        let return_type = InferenceType::new(!poly_args.is_empty(), return_type_done, parts);
 

	
 
        self.extra_data.insert(lit_id.upcast(), ExtraData{
 
            poly_vars: poly_args,
 
            embedded: embedded_types,
 
            returned: return_type,
 
        });
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct for enum expressions. These
 
    /// can never be determined from the enum itself, but may be inferred from
 
    /// the use of the enum.
 
    fn insert_initial_enum_polymorph_data(
 
        &mut self, ctx: &Ctx, lit_id: LiteralExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = ctx.heap[lit_id].value.as_enum();
 

	
 
        // Handle polymorphic arguments to the enum
 
        let num_poly_args = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle enum type itself
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(literal.definition, poly_args.len()));
 
        let mut enum_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { enum_type_done = false; }
 

	
 
            parts.push(ITP::MarkerBody(poly_var_idx));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts.len(), parts_reserved);
 
        let enum_type = InferenceType::new(!poly_args.is_empty(), enum_type_done, parts);
 

	
 
        self.extra_data.insert(lit_id.upcast(), ExtraData{
 
            poly_vars: poly_args,
 
            embedded: Vec::new(),
 
            returned: enum_type,
 
        });
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct for unions. The polymorphic
 
    /// arguments may be partially determined from embedded values in the union.
 
    fn insert_initial_union_polymorph_data(
 
        &mut self, ctx: &Ctx, lit_id: LiteralExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = ctx.heap[lit_id].value.as_union();
 

	
 
        // Construct the polymorphic variables
 
        let num_poly_args = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle any of the embedded values in the variant, if specified
 
        let definition_id = literal.definition;
 
        let type_definition = ctx.types.get_base_definition(&definition_id).unwrap();
 
        let union_definition = type_definition.definition.as_union();
 
        debug_assert_eq!(poly_args.len(), type_definition.poly_vars.len());
 

	
 
        let variant_definition = &union_definition.variants[literal.variant_idx];
 
        debug_assert_eq!(variant_definition.embedded.len(), literal.values.len());
src/protocol/tests/eval_calls.rs
Show inline comments
 
use super::*;
 
use crate::protocol::eval::*;
 

	
 
#[test]
 
fn test_function_call() {
 
    Tester::new_single_source_expect_ok("with literal arg", "
 
    func add_two(u32 value) -> u32 {
 
        return value + 2;
 
    }
 
    func foo() -> u32 {
 
        return add_two(5);
 
    }
 
    ").for_function("foo", |f| {
 
        f.call(Some(Value::UInt32(7)));
 
    });
 

	
 
    println!("\n\n\n\n\n\n\n");
 

	
 
    Tester::new_single_source_expect_ok("with variable arg", "
 
    func add_two(u32 value) -> u32 {
 
        value += 1;
 
        return value + 1;
 
    }
 
    func foo() -> bool {
 
        auto initial = 5;
 
        auto result = add_two(initial);
 
        return initial == 5 && result == 7;
 
    }").for_function("foo", |f| {
 
        f.call(Some(Value::Bool(true)));
 
    });
 
}
 
\ No newline at end of file
src/protocol/tests/eval_operators.rs
Show inline comments
 
use super::*;
 
use crate::protocol::eval::*;
 

	
 
#[test]
 
fn test_assignment_operators() {
 
    fn construct_source(value_type: &str, value_initial: &str, value_op: &str) -> String {
 
        return format!(
 
            "func foo() -> {} {{
 
                {} value = {};
 
                value {};
 
                return value;
 
            }}",
 
            value_type, value_type, value_initial, value_op
 
        );
 
    }
 

	
 
    fn perform_test(name: &str, source: String, expected_value: Value) {
 
        Tester::new_single_source_expect_ok(name, source)
 
            .for_function("foo", move |f| {
 
                f.call(Some(expected_value));
 
            });
 
    }
 

	
 
    perform_test(
 
        "set",
 
        construct_source("u32", "1", "= 5"),
 
        Value::UInt32(5)
 
    );
 

	
 
    perform_test(
 
        "multiplied",
 
        construct_source("u32", "2", "*= 4"),
 
        Value::UInt32(8)
 
    );
 

	
 
    perform_test(
 
        "divided",
 
        construct_source("u32", "8", "/= 4"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "remained",
 
        construct_source("u32", "8", "%= 3"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "added",
 
        construct_source("u32", "2", "+= 4"),
 
        Value::UInt32(6)
 
    );
 

	
 
    perform_test(
 
        "subtracted",
 
        construct_source("u32", "6", "-= 4"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "shifted left",
 
        construct_source("u32", "2", "<<= 2"),
 
        Value::UInt32(8)
 
    );
 

	
 
    perform_test(
 
        "shifted right",
 
        construct_source("u32", "8", ">>= 2"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "bitwise and",
 
        construct_source("u32", "15", "&= 35"),
 
        Value::UInt32(3)
 
    );
 

	
 
    perform_test(
 
        "bitwise xor",
 
        construct_source("u32", "3", "^= 7"),
 
        Value::UInt32(4)
 
    );
 

	
 
    perform_test(
 
        "bitwise or",
 
        construct_source("u32", "12", "|= 3"),
 
        Value::UInt32(15)
 
    );
 
}
 

	
 
#[test]
 
fn test_concatenate_operator() {
 
    Tester::new_single_source_expect_ok(
 
        "concatenate and check pairs",
 
        "
 
        func check_pair<T>(T[] arr, u32 idx) -> bool {
 
            return arr[idx] == arr[idx + 1];
 
        }
 

	
 
        struct Point2D {
 
            u32 x,
 
            u32 y,
 
        }
 

	
 
        func create_point(u32 x, u32 y) -> Point2D {
 
            return Point2D{ x: x, y: y };
 
        }
 

	
 
        func create_array() -> Point2D[] {
 
            return {
 
                create_point(1, 2),
 
                create_point(1, 2),
 
                create_point(3, 4),
 
                create_point(3, 4)
 
            };
 
        }
 

	
 
        func foo() -> bool {
 
            auto lhs = create_array();
 
            auto rhs = create_array();
 
            auto total = lhs @ rhs;
 
            auto is_equal =
 
                check_pair(total, 0) &&
 
                check_pair(total, 2) &&
 
                check_pair(total, 4) &&
 
                check_pair(total, 6);
 
            auto is_not_equal =
 
                !check_pair(total, 0) ||
 
                !check_pair(total, 2) ||
 
                !check_pair(total, 4) ||
 
                !check_pair(total, 6);
 
            return is_equal && !is_not_equal;
 
        }
 
        "
 
    ).for_function("foo", |f| {
 
        f.call(Some(Value::Bool(true)));
 
    });
 
}
 
#[test]
 
fn test_binary_integer_operators() {
 
    fn construct_source(value_type: &str, code: &str) -> String {
 
        format!("
 
        func foo() -> {} {{
 
            {}
 
        }}
 
        ", value_type, code)
 
    }
 

	
 
    fn perform_test(test_name: &str, value_type: &str, code: &str, expected_value: Value) {
 
        Tester::new_single_source_expect_ok(test_name, construct_source(value_type, code))
 
            .for_function("foo", move |f| {
 
                f.call(Some(expected_value));
 
            });
 
    }
 

	
 
    perform_test(
 
        "bitwise_or", "u16",
 
        "auto a = 3; return a | 4;", Value::UInt16(7)
 
    );
 
    perform_test(
 
        "bitwise_xor", "u16",
 
        "auto a = 3; return a ^ 7;", Value::UInt16(4)
 
    );
 
    perform_test(
 
        "bitwise and", "u16",
 
        "auto a = 0b110011; return a & 0b011110;", Value::UInt16(0b010010)
 
    );
 
    perform_test(
 
        "shift left", "u16",
 
        "auto a = 0x0F; return a << 4;", Value::UInt16(0xF0)
 
    );
 
    perform_test(
 
        "shift right", "u64",
 
        "auto a = 0xF0; return a >> 4;", Value::UInt64(0x0F)
 
    );
 
    perform_test(
 
        "add", "u32",
 
        "auto a = 5; return a + 5;", Value::UInt32(10)
 
    );
 
    perform_test(
 
        "subtract", "u32",
 
        "auto a = 3; return a - 3;", Value::UInt32(0)
 
    );
 
    perform_test(
 
        "multiply", "u8",
 
        "auto a = 2 * 2; return a * 2 * 2;", Value::UInt8(16)
 
    );
 
    perform_test(
 
        "divide", "u8",
 
        "auto a = 32 / 2; return a / 2 / 2;", Value::UInt8(4)
 
    );
 
    perform_test(
 
        "remainder", "u16",
 
        "auto a = 29; return a % 3;", Value::UInt16(2)
 
    );
 
}
 
\ No newline at end of file
src/protocol/tests/eval_silly.rs
Show inline comments
 
new file 100644
 
use super::*;
 

	
 
#[test]
 
fn test_concatenate_operator() {
 
    Tester::new_single_source_expect_ok(
 
        "concatenate and check values",
 
        "
 
        // Too see if we accept the polymorphic arg
 
        func check_pair<T>(T[] arr, u32 idx) -> bool {
 
            return arr[idx] == arr[idx + 1];
 
        }
 

	
 
        // Too see if we can check fields of polymorphs
 
        func check_values<T>(T[] arr, u32 idx, u32 x, u32 y) -> bool {
 
            auto value = arr[idx];
 
            return value.x == x && value.y == y;
 
        }
 

	
 
        struct Point2D {
 
            u32 x,
 
            u32 y,
 
        }
 

	
 
        // Could do this inline, but we're attempt to stress the system a bit
 
        func create_point(u32 x, u32 y) -> Point2D {
 
            return Point2D{ x: x, y: y };
 
        }
 

	
 
        // Again, more stressing: returning a heap-allocated thing
 
        func create_array() -> Point2D[] {
 
            return {
 
                create_point(1, 2),
 
                create_point(1, 2),
 
                create_point(3, 4),
 
                create_point(3, 4)
 
            };
 
        }
 

	
 
        // The silly checkamajig
 
        func foo() -> bool {
 
            auto lhs = create_array();
 
            auto rhs = create_array();
 
            auto total = lhs @ rhs;
 
            auto is_equal =
 
                check_pair(total, 0) &&
 
                check_pair(total, 2) &&
 
                check_pair(total, 4) &&
 
                check_pair(total, 6);
 
            auto is_not_equal =
 
                !check_pair(total, 0) ||
 
                !check_pair(total, 2) ||
 
                !check_pair(total, 4) ||
 
                !check_pair(total, 6);
 
            auto has_correct_fields =
 
                check_values(total, 3, 3, 4) &&
 
                check_values(total, 4, 1, 2);
 
            return is_equal && !is_not_equal && has_correct_fields;
 
        }
 
        "
 
    ).for_function("foo", |f| {
 
        f.call(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_struct_fields() {
 
    Tester::new_single_source_expect_ok("struct field access",
 
"
 
struct Nester<T> {
 
    T v,
 
}
 

	
 
func make<T>(T inner) -> Nester<T> {
 
    return Nester{ v: inner };
 
}
 

	
 
func modify<T>(Nester<T> outer, T inner) -> Nester<T> {
 
    outer.v = inner;
 
    return outer;
 
}
 

	
 
func foo() -> bool {
 
    // Single depth modification
 
    auto original1 = make<u32>(5);
 
    auto modified1 = modify(original1, 2);
 
    auto success1 = original1.v == 5 && modified1.v == 2;
 

	
 
    // Multiple levels of modification
 
    auto original2 = make(make(make(make(true))));
 
    auto modified2 = modify(original2.v, make(make(false))); // strip one Nester level
 
    auto success2 = original2.v.v.v.v == true && modified2.v.v.v == false;
 

	
 
    return success1 && success2;
 
}
 
").for_function("foo", |f| {
 
        f.call(Some(Value::Bool(true)));
 
    });
 
}
 
\ No newline at end of file
src/protocol/tests/mod.rs
Show inline comments
 
/**
 
 * protocol/tests.rs
 
 *
 
 * Contains tests for various parts of the lexer/parser and the evaluator of the
 
 * code. These are intended to be temporary tests such that we're sure that we
 
 * don't break existing functionality.
 
 *
 
 * In the future these should be replaced by proper testing protocols.
 
 */
 

	
 
mod utils;
 
mod lexer;
 
mod parser_validation;
 
mod parser_inference;
 
mod parser_monomorphs;
 
mod parser_imports;
 
mod eval_operators;
 
mod eval_calls;
 
mod eval_silly;
 

	
 
pub(crate) use utils::{Tester};
 
\ No newline at end of file
 
pub(crate) use utils::{Tester}; // the testing harness
 
pub(crate) use crate::protocol::eval::value::*; // to test functions
 
\ No newline at end of file

Changeset was too big and was cut off... Show full diff anyway

0 comments (0 inline, 0 general)