Changeset - c79f28079eb9
[Not reviewed]
MH - 4 years ago 2021-07-27 13:37:59
contact@maxhenger.nl
make all tests work again
3 files changed with 13 insertions and 10 deletions:
0 comments (0 inline, 0 general)
src/protocol/parser/pass_typing.rs
Show inline comments
 
@@ -2769,1167 +2769,1170 @@ impl PassTyping {
 
            )
 
        }
 
    }
 

	
 
    fn apply_template_constraint_to_types(
 
        to_infer: *mut InferenceType, to_infer_start_idx: usize,
 
        template: &[InferenceTypePart], template_start_idx: usize
 
    ) -> Result<bool, ()> {
 
        match InferenceType::infer_subtree_for_single_type(
 
            unsafe{ &mut *to_infer }, to_infer_start_idx,
 
            template, template_start_idx, false
 
        ) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(()),
 
        }
 
    }
 

	
 
    /// Applies a forced constraint: the supplied expression's type MUST be
 
    /// inferred from the template, the other way around is considered invalid.
 
    fn apply_forced_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> Result<bool, ParseError> {
 
        let expr_idx = ctx.heap[expr_id].get_unique_id_in_definition();
 
        let expr_type = &mut self.expr_types[expr_idx as usize].expr_type;
 
        match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0, true) {
 
            SingleInferenceResult::Modified => Ok(true),
 
            SingleInferenceResult::Unmodified => Ok(false),
 
            SingleInferenceResult::Incompatible => Err(
 
                self.construct_template_type_error(ctx, expr_id, template)
 
            )
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects the two provided types to be
 
    /// equal. We attempt to make progress in inferring the types. If the call
 
    /// is successful then the composition of all types are made equal.
 
    /// The "parent" `expr_id` is provided to construct errors.
 
    fn apply_equal2_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg1_start_idx: usize,
 
        arg2_id: ExpressionId, arg2_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError> {
 
        let arg1_expr_idx = ctx.heap[arg1_id].get_unique_id_in_definition(); // TODO: @Temp
 
        let arg2_expr_idx = ctx.heap[arg2_id].get_unique_id_in_definition();
 
        let arg1_type: *mut _ = &mut self.expr_types[arg1_expr_idx as usize].expr_type;
 
        let arg2_type: *mut _ = &mut self.expr_types[arg2_expr_idx as usize].expr_type;
 

	
 
        let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types(
 
            arg1_type, arg1_start_idx,
 
            arg2_type, arg2_start_idx
 
        ) };
 
        if infer_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        Ok((infer_res.modified_lhs(), infer_res.modified_rhs()))
 
    }
 

	
 
    /// Applies an equal2 constraint between a signature type (e.g. a function
 
    /// argument or struct field) and an expression whose type should match that
 
    /// expression. If we make progress on the signature, then we try to see if
 
    /// any of the embedded polymorphic types can be progressed.
 
    ///
 
    /// `outer_expr_id` is the main expression we're progressing (e.g. a 
 
    /// function call), while `expr_id` is the embedded expression we're 
 
    /// matching against the signature. `expression_type` and 
 
    /// `expression_start_idx` belong to `expr_id`.
 
    fn apply_equal2_signature_constraint(
 
        ctx: &Ctx, outer_expr_id: ExpressionId, expr_id: Option<ExpressionId>,
 
        polymorph_data: &mut ExtraData, polymorph_progress: &mut HashSet<u32>,
 
        signature_type: *mut InferenceType, signature_start_idx: usize,
 
        expression_type: *mut InferenceType, expression_start_idx: usize
 
    ) -> Result<(bool, bool), ParseError> {
 
        // Safety: all pointers distinct
 

	
 
        // Infer the signature and expression type
 
        let infer_res = unsafe { 
 
            InferenceType::infer_subtrees_for_both_types(
 
                signature_type, signature_start_idx,
 
                expression_type, expression_start_idx
 
            ) 
 
        };
 

	
 
        if infer_res == DualInferenceResult::Incompatible {
 
            // TODO: Check if I still need to use this
 
            let outer_span = ctx.heap[outer_expr_id].full_span();
 
            let (span_name, span) = match expr_id {
 
                Some(expr_id) => ("argument's", ctx.heap[expr_id].full_span()),
 
                None => ("type's", outer_span)
 
            };
 
            let (signature_display_type, expression_display_type) = unsafe { (
 
                (&*signature_type).display_name(&ctx.heap),
 
                (&*expression_type).display_name(&ctx.heap)
 
            ) };
 

	
 
            return Err(ParseError::new_error_str_at_span(
 
                &ctx.module().source, outer_span,
 
                "failed to fully resolve the types of this expression"
 
            ).with_info_at_span(
 
                &ctx.module().source, span, format!(
 
                    "because the {} signature has been resolved to '{}', but the expression has been resolved to '{}'",
 
                    span_name, signature_display_type, expression_display_type
 
                )
 
            ));
 
        }
 

	
 
        // Try to see if we can progress any of the polymorphic variables
 
        let progress_sig = infer_res.modified_lhs();
 
        let progress_expr = infer_res.modified_rhs();
 

	
 
        if progress_sig {
 
            let signature_type = unsafe{&mut *signature_type};
 
            debug_assert!(
 
                signature_type.has_marker,
 
                "made progress on signature type, but it doesn't have a marker"
 
            );
 
            for (poly_idx, poly_section) in signature_type.marker_iter() {
 
                let polymorph_type = &mut polymorph_data.poly_vars[poly_idx as usize];
 
                match Self::apply_template_constraint_to_types(
 
                    polymorph_type, 0, poly_section, 0
 
                ) {
 
                    Ok(true) => { polymorph_progress.insert(poly_idx); },
 
                    Ok(false) => {},
 
                    Err(()) => { return Err(Self::construct_poly_arg_error(ctx, polymorph_data, outer_expr_id))}
 
                }
 
            }
 
        }
 
        Ok((progress_sig, progress_expr))
 
    }
 

	
 
    /// Applies equal2 constraints on the signature type for each of the 
 
    /// polymorphic variables. If the signature type is progressed then we 
 
    /// progress the expression type as well.
 
    ///
 
    /// This function assumes that the polymorphic variables have already been
 
    /// progressed as far as possible by calling 
 
    /// `apply_equal2_signature_constraint`. As such, we expect to not encounter
 
    /// any errors.
 
    ///
 
    /// This function returns true if the expression's type has been progressed
 
    fn apply_equal2_polyvar_constraint(
 
        polymorph_data: &ExtraData, _polymorph_progress: &HashSet<u32>,
 
        signature_type: *mut InferenceType, expr_type: *mut InferenceType
 
    ) -> bool {
 
        // Safety: all pointers should be distinct
 
        //         polymorph_data containers may not be modified
 
        let signature_type = unsafe{&mut *signature_type};
 
        let expr_type = unsafe{&mut *expr_type};
 

	
 
        // Iterate through markers in signature type to try and make progress
 
        // on the polymorphic variable        
 
        let mut seek_idx = 0;
 
        let mut modified_sig = false;
 
        
 
        while let Some((poly_idx, start_idx)) = signature_type.find_marker(seek_idx) {
 
            let end_idx = InferenceType::find_subtree_end_idx(&signature_type.parts, start_idx);
 
            // if polymorph_progress.contains(&poly_idx) {
 
                // Need to match subtrees
 
                let polymorph_type = &polymorph_data.poly_vars[poly_idx as usize];
 
                let modified_at_marker = Self::apply_template_constraint_to_types(
 
                    signature_type, start_idx, 
 
                    &polymorph_type.parts, 0
 
                ).expect("no failure when applying polyvar constraints");
 

	
 
                modified_sig = modified_sig || modified_at_marker;
 
            // }
 

	
 
            seek_idx = end_idx;
 
        }
 

	
 
        // If we made any progress on the signature's type, then we also need to
 
        // apply it to the expression that is supposed to match the signature.
 
        if modified_sig {
 
            match InferenceType::infer_subtree_for_single_type(
 
                expr_type, 0, &signature_type.parts, 0, true
 
            ) {
 
                SingleInferenceResult::Modified => true,
 
                SingleInferenceResult::Unmodified => false,
 
                SingleInferenceResult::Incompatible =>
 
                    unreachable!("encountered failure while reapplying modified signature to expression after polyvar inference")
 
            }
 
        } else {
 
            false
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects all three provided types to be
 
    /// equal. In case we can make progress in inferring the types then we
 
    /// attempt to do so. If the call is successful then the composition of all
 
    /// types is made equal.
 
    fn apply_equal3_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId,
 
        start_idx: usize
 
    ) -> Result<(bool, bool, bool), ParseError> {
 
        // Safety: all points are unique
 
        //         containers may not be modified
 
        let expr_expr_idx = ctx.heap[expr_id].get_unique_id_in_definition(); // TODO: @Temp
 
        let arg1_expr_idx = ctx.heap[arg1_id].get_unique_id_in_definition();
 
        let arg2_expr_idx = ctx.heap[arg2_id].get_unique_id_in_definition();
 

	
 
        let expr_type: *mut _ = &mut self.expr_types[expr_expr_idx as usize].expr_type;
 
        let arg1_type: *mut _ = &mut self.expr_types[arg1_expr_idx as usize].expr_type;
 
        let arg2_type: *mut _ = &mut self.expr_types[arg2_expr_idx as usize].expr_type;
 

	
 
        let expr_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx)
 
        };
 
        if expr_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_expr_type_error(ctx, expr_id, arg1_id));
 
        }
 

	
 
        let args_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) };
 
        if args_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        // If all types are compatible, but the second call caused the arg1_type
 
        // to be expanded, then we must also assign this to expr_type.
 
        let mut progress_expr = expr_res.modified_lhs();
 
        let mut progress_arg1 = expr_res.modified_rhs();
 
        let progress_arg2 = args_res.modified_rhs();
 

	
 
        if args_res.modified_lhs() { 
 
            unsafe {
 
                let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx);
 
                let subtree = &((*arg2_type).parts[start_idx..end_idx]);
 
                (*expr_type).replace_subtree(start_idx, subtree);
 
            }
 
            progress_expr = true;
 
            progress_arg1 = true;
 
        }
 

	
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError> {
 
        // Early exit
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
        }
 

	
 
        let mut progress = Vec::new();
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let last_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition(); // TODO: @Temp
 
            let next_expr_idx = ctx.heap[*next_arg_id].get_unique_id_in_definition();
 
            let last_type: *mut _ = &mut self.expr_types[last_expr_idx as usize].expr_type;
 
            let next_type: *mut _ = &mut self.expr_types[next_expr_idx as usize].expr_type;
 

	
 
            let res = unsafe {
 
                InferenceType::infer_subtrees_for_both_types(last_type, 0, next_type, 0)
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
                // We re-inferred something on the left hand side, so everything
 
                // up until now should be re-inferred.
 
                progress[lhs_arg_idx] = true;
 
                last_lhs_progressed = lhs_arg_idx;
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_arg_expr_idx = ctx.heap[*args.last().unwrap()].get_unique_id_in_definition();
 
        let last_type: *mut _ = &mut self.expr_types[last_arg_expr_idx as usize].expr_type;
 
        for arg_idx in 0..last_lhs_progressed {
 
            let other_arg_expr_idx = ctx.heap[args[arg_idx]].get_unique_id_in_definition();
 
            let arg_type: *mut _ = &mut self.expr_types[other_arg_expr_idx as usize].expr_type;
 
            unsafe{
 
                (*arg_type).replace_subtree(0, &(*last_type).parts);
 
            }
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
 
    /// expression parent. Note that if the parent is another expression, we do
 
    /// not take special action, instead we let parent expressions fix the type
 
    /// of subexpressions before they have a chance to call this function.
 
    fn insert_initial_expr_inference_type(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId
 
    ) -> Result<(), ParseError> {
 
        use ExpressionParent as EP;
 
        use InferenceTypePart as ITP;
 

	
 
        let expr = &ctx.heap[expr_id];
 
        let inference_type = match expr.parent() {
 
            EP::None =>
 
                // Should have been set by linker
 
                unreachable!(),
 
            EP::ExpressionStmt(_) =>
 
                // Determined during type inference
 
                InferenceType::new(false, false, vec![ITP::Unknown]),
 
            EP::Expression(parent_id, idx_in_parent) => {
 
                // If we are the test expression of a conditional expression,
 
                // then we must resolve to a boolean
 
                let is_conditional = if let Expression::Conditional(_) = &ctx.heap[*parent_id] {
 
                    true
 
                } else {
 
                    false
 
                };
 

	
 
                if is_conditional && *idx_in_parent == 0 {
 
                    InferenceType::new(false, true, vec![ITP::Bool])
 
                } else {
 
                    InferenceType::new(false, false, vec![ITP::Unknown])
 
                }
 
            },
 
            EP::If(_) | EP::While(_) =>
 
                // Must be a boolean
 
                InferenceType::new(false, true, vec![ITP::Bool]),
 
            EP::Return(_) =>
 
                // Must match the return type of the function
 
                if let DefinitionType::Function(func_id) = self.definition_type {
 
                    debug_assert_eq!(ctx.heap[func_id].return_types.len(), 1);
 
                    let returned = &ctx.heap[func_id].return_types[0];
 
                    self.determine_inference_type_from_parser_type_elements(&returned.elements, true)
 
                } else {
 
                    // Cannot happen: definition always set upon body traversal
 
                    // and "return" calls in components are illegal.
 
                    unreachable!();
 
                },
 
            EP::New(_) =>
 
                // Must be a component call, which we assign a "Void" return
 
                // type
 
                InferenceType::new(false, true, vec![ITP::Void]),
 
        };
 

	
 
        let infer_expr = &mut self.expr_types[expr.get_unique_id_in_definition() as usize];
 
        let needs_extra_data = match expr {
 
            Expression::Call(_) => true,
 
            Expression::Literal(expr) => match expr.value {
 
                Literal::Enum(_) | Literal::Union(_) | Literal::Struct(_) => true,
 
                _ => false,
 
            },
 
            Expression::Select(_) => true,
 
            _ => false,
 
        };
 

	
 
        if infer_expr.expr_id.is_invalid() {
 
            // Nothing is set yet
 
            infer_expr.expr_type = inference_type;
 
            infer_expr.expr_id = expr_id;
 
            if needs_extra_data {
 
                let extra_idx = self.extra_data.len() as i32;
 
                self.extra_data.push(ExtraData::default());
 
                infer_expr.extra_data_idx = extra_idx;
 
            }
 
        } else {
 
            // We already have an entry
 
            debug_assert!(false, "does this ever happen?");
 
            if let SingleInferenceResult::Incompatible = InferenceType::infer_subtree_for_single_type(
 
                &mut infer_expr.expr_type, 0, &inference_type.parts, 0, false
 
            ) {
 
                return Err(self.construct_expr_type_error(ctx, expr_id, expr_id));
 
            }
 

	
 
            debug_assert!((infer_expr.extra_data_idx != -1) == needs_extra_data);
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn insert_initial_call_polymorph_data(
 
        &mut self, ctx: &mut Ctx, call_id: CallExpressionId
 
    ) {
 
        // Note: the polymorph variables may be partially specified and may
 
        // contain references to the wrapping definition's (i.e. the proctype
 
        // we are currently visiting) polymorphic arguments.
 
        //
 
        // The arguments of the call may refer to polymorphic variables in the
 
        // definition of the function we're calling, not of the wrapping
 
        // definition. We insert markers in these inferred types to be able to
 
        // map them back and forth to the polymorphic arguments of the function
 
        // we are calling.
 
        let call = &ctx.heap[call_id];
 
        let extra_data_idx = self.expr_types[call.unique_id_in_definition as usize].extra_data_idx; // TODO: @Temp
 
        debug_assert!(extra_data_idx != -1, "insert initial call polymorph data, no preallocated ExtraData");
 

	
 
        // Handle the polymorphic arguments (if there are any)
 
        let num_poly_args = call.parser_type.elements[0].variant.num_embedded();
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 
        for embedded_elements in call.parser_type.iter_embedded(0) {
 
            poly_args.push(self.determine_inference_type_from_parser_type_elements(embedded_elements, true));
 
        }
 

	
 
        // Handle the arguments and return types
 
        let definition = &ctx.heap[call.definition];
 
        let (parameters, returned) = match definition {
 
            Definition::Component(definition) => {
 
                debug_assert_eq!(poly_args.len(), definition.poly_vars.len());
 
                (&definition.parameters, None)
 
            },
 
            Definition::Function(definition) => {
 
                debug_assert_eq!(poly_args.len(), definition.poly_vars.len());
 
                (&definition.parameters, Some(&definition.return_types))
 
            },
 
            Definition::Struct(_) | Definition::Enum(_) | Definition::Union(_) => {
 
                unreachable!("insert_initial_call_polymorph data for non-procedure type");
 
            },
 
        };
 

	
 
        let mut parameter_types = Vec::with_capacity(parameters.len());
 
        for parameter_id in parameters.clone().into_iter() { // TODO: @Performance @Now
 
            let param = &ctx.heap[parameter_id];
 
            parameter_types.push(self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, false));
 
        }
 

	
 
        let return_type = match returned {
 
            None => {
 
                // Component, so returns a "Void"
 
                InferenceType::new(false, true, vec![InferenceTypePart::Void])
 
            },
 
            Some(returned) => {
 
                debug_assert_eq!(returned.len(), 1); // TODO: @ReturnTypes
 
                let returned = &returned[0];
 
                self.determine_inference_type_from_parser_type_elements(&returned.elements, false)
 
            }
 
        };
 

	
 
        self.extra_data[extra_data_idx as usize] = ExtraData{
 
            expr_id: call_id.upcast(),
 
            definition_id: call.definition,
 
            poly_vars: poly_args,
 
            embedded: parameter_types,
 
            returned: return_type
 
        };
 
    }
 

	
 
    fn insert_initial_struct_polymorph_data(
 
        &mut self, ctx: &mut Ctx, lit_id: LiteralExpressionId,
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = &ctx.heap[lit_id];
 
        let extra_data_idx = self.expr_types[literal.unique_id_in_definition as usize].extra_data_idx; // TODO: @Temp
 
        debug_assert!(extra_data_idx != -1, "initial struct polymorph data, but no preallocated ExtraData");
 
        let literal = ctx.heap[lit_id].value.as_struct();
 

	
 
        // Handle polymorphic arguments
 
        let num_embedded = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_embedded);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle parser types on struct definition
 
        let defined_type = ctx.types.get_base_definition(&literal.definition).unwrap();
 
        let struct_type = defined_type.definition.as_struct();
 
        debug_assert_eq!(poly_args.len(), defined_type.poly_vars.len());
 

	
 
        // Note: programmer is capable of specifying fields in a struct literal
 
        // in a different order than on the definition. We take the literal-
 
        // specified order to be leading.
 
        let mut embedded_types = Vec::with_capacity(struct_type.fields.len());
 
        for lit_field in literal.fields.iter() {
 
            let def_field = &struct_type.fields[lit_field.field_idx];
 
            let inference_type = self.determine_inference_type_from_parser_type_elements(&def_field.parser_type.elements, false);
 
            embedded_types.push(inference_type);
 
        }
 

	
 
        // Return type is the struct type itself, with the appropriate 
 
        // polymorphic variables. So:
 
        // - 1 part for definition
 
        // - N_poly_arg marker parts for each polymorphic argument
 
        // - all the parts for the currently known polymorphic arguments 
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(literal.definition, poly_args.len() as u32));
 
        let mut return_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { return_type_done = false; }
 

	
 
            parts.push(ITP::Marker(poly_var_idx as u32));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts.len(), parts_reserved);
 
        let return_type = InferenceType::new(!poly_args.is_empty(), return_type_done, parts);
 

	
 
        self.extra_data[extra_data_idx as usize] = ExtraData{
 
            expr_id: lit_id.upcast(),
 
            definition_id: literal.definition,
 
            poly_vars: poly_args,
 
            embedded: embedded_types,
 
            returned: return_type,
 
        };
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct for enum expressions. These
 
    /// can never be determined from the enum itself, but may be inferred from
 
    /// the use of the enum.
 
    fn insert_initial_enum_polymorph_data(
 
        &mut self, ctx: &Ctx, lit_id: LiteralExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = &ctx.heap[lit_id];
 
        let extra_data_idx = self.expr_types[literal.unique_id_in_definition as usize].extra_data_idx; // TODO: @Temp
 
        debug_assert!(extra_data_idx != -1, "initial enum polymorph data, but no preallocated ExtraData");
 
        let literal = ctx.heap[lit_id].value.as_enum();
 

	
 
        // Handle polymorphic arguments to the enum
 
        let num_poly_args = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle enum type itself
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(literal.definition, poly_args.len() as u32));
 
        let mut enum_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { enum_type_done = false; }
 

	
 
            parts.push(ITP::Marker(poly_var_idx as u32));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts.len(), parts_reserved);
 
        let enum_type = InferenceType::new(!poly_args.is_empty(), enum_type_done, parts);
 

	
 
        self.extra_data[extra_data_idx as usize] = ExtraData{
 
            expr_id: lit_id.upcast(),
 
            definition_id: literal.definition,
 
            poly_vars: poly_args,
 
            embedded: Vec::new(),
 
            returned: enum_type,
 
        };
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct for unions. The polymorphic
 
    /// arguments may be partially determined from embedded values in the union.
 
    fn insert_initial_union_polymorph_data(
 
        &mut self, ctx: &Ctx, lit_id: LiteralExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = &ctx.heap[lit_id];
 
        let extra_data_idx = self.expr_types[literal.unique_id_in_definition as usize].extra_data_idx; // TODO: @Temp
 
        debug_assert!(extra_data_idx != -1, "initial union polymorph data, but no preallocated ExtraData");
 
        let literal = ctx.heap[lit_id].value.as_union();
 

	
 
        // Construct the polymorphic variables
 
        let num_poly_args = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle any of the embedded values in the variant, if specified
 
        let definition_id = literal.definition;
 
        let type_definition = ctx.types.get_base_definition(&definition_id).unwrap();
 
        let union_definition = type_definition.definition.as_union();
 
        debug_assert_eq!(poly_args.len(), type_definition.poly_vars.len());
 

	
 
        let variant_definition = &union_definition.variants[literal.variant_idx];
 
        debug_assert_eq!(variant_definition.embedded.len(), literal.values.len());
 

	
 
        let mut embedded = Vec::with_capacity(variant_definition.embedded.len());
 
        for embedded_parser_type in &variant_definition.embedded {
 
            let inference_type = self.determine_inference_type_from_parser_type_elements(&embedded_parser_type.elements, false);
 
            embedded.push(inference_type);
 
        }
 

	
 
        // Handle the type of the union itself
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(definition_id, poly_args.len() as u32));
 
        let mut union_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { union_type_done = false; }
 

	
 
            parts.push(ITP::Marker(poly_var_idx as u32));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts_reserved, parts.len());
 
        let union_type = InferenceType::new(!poly_args.is_empty(), union_type_done, parts);
 

	
 
        self.extra_data[extra_data_idx as usize] = ExtraData{
 
            expr_id: lit_id.upcast(),
 
            definition_id: literal.definition,
 
            poly_vars: poly_args,
 
            embedded,
 
            returned: union_type
 
        };
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct. Assumes that the select
 
    /// expression's referenced (definition_id, field_idx) has been resolved.
 
    fn insert_initial_select_polymorph_data(
 
        &mut self, ctx: &Ctx, select_id: SelectExpressionId, struct_def_id: DefinitionId
 
    ) {
 
        use InferenceTypePart as ITP;
 

	
 
        // Retrieve relevant data
 
        let expr = &ctx.heap[select_id];
 
        let expr_type = &self.expr_types[expr.unique_id_in_definition as usize];
 
        let field_idx = expr_type.field_or_monomorph_idx as usize;
 
        let extra_data_idx = expr_type.extra_data_idx; // TODO: @Temp
 
        debug_assert!(extra_data_idx != -1, "initial select polymorph data, but no preallocated ExtraData");
 

	
 
        let definition = ctx.heap[struct_def_id].as_struct();
 

	
 
        // Generate initial polyvar types and struct type
 
        // TODO: @Performance: we can immediately set the polyvars of the subject's struct type
 
        let num_poly_vars = definition.poly_vars.len();
 
        let mut poly_vars = Vec::with_capacity(num_poly_vars);
 
        let struct_parts_reserved = 1 + 2 * num_poly_vars;
 
        let mut struct_parts = Vec::with_capacity(struct_parts_reserved);
 
        struct_parts.push(ITP::Instance(struct_def_id, num_poly_vars as u32));
 

	
 
        for poly_idx in 0..num_poly_vars {
 
            poly_vars.push(InferenceType::new(true, false, vec![
 
                ITP::Marker(poly_idx as u32), ITP::Unknown,
 
            ]));
 
            struct_parts.push(ITP::Marker(poly_idx as u32));
 
            struct_parts.push(ITP::Unknown);
 
        }
 
        debug_assert_eq!(struct_parts.len(), struct_parts_reserved);
 

	
 
        // Generate initial field type
 
        let field_type = self.determine_inference_type_from_parser_type_elements(&definition.fields[field_idx].parser_type.elements, false);
 
        self.extra_data[extra_data_idx as usize] = ExtraData{
 
            expr_id: select_id.upcast(),
 
            definition_id: struct_def_id,
 
            poly_vars,
 
            embedded: vec![InferenceType::new(num_poly_vars != 0, num_poly_vars == 0, struct_parts)],
 
            returned: field_type
 
        };
 
    }
 

	
 
    /// Determines the initial InferenceType from the provided ParserType. This
 
    /// may be called with two kinds of intentions:
 
    /// 1. To resolve a ParserType within the body of a function, or on
 
    ///     polymorphic arguments to calls/instantiations within that body. This
 
    ///     means that the polymorphic variables are known and can be replaced
 
    ///     with the monomorph we're instantiating.
 
    /// 2. To resolve a ParserType on a called function's definition or on
 
    ///     an instantiated datatype's members. This means that the polymorphic
 
    ///     arguments inside those ParserTypes refer to the polymorphic
 
    ///     variables in the called/instantiated type's definition.
 
    /// In the second case we place InferenceTypePart::Marker instances such
 
    /// that we can perform type inference on the polymorphic variables.
 
    fn determine_inference_type_from_parser_type_elements(
 
        &mut self, elements: &[ParserTypeElement],
 
        use_definitions_known_poly_args: bool
 
    ) -> InferenceType {
 
        use ParserTypeVariant as PTV;
 
        use InferenceTypePart as ITP;
 

	
 
        let mut infer_type = Vec::with_capacity(elements.len());
 
        let mut has_inferred = false;
 
        let mut has_markers = false;
 

	
 
        for element in elements {
 
            match &element.variant {
 
                // Compiler-only types
 
                PTV::Void => { infer_type.push(ITP::Void); },
 
                PTV::InputOrOutput => { infer_type.push(ITP::PortLike); has_inferred = true },
 
                PTV::ArrayLike => { infer_type.push(ITP::ArrayLike); has_inferred = true },
 
                PTV::IntegerLike => { infer_type.push(ITP::IntegerLike); has_inferred = true },
 
                // Builtins
 
                PTV::Message => {
 
                    // TODO: @types Remove the Message -> Byte hack at some point...
 
                    infer_type.push(ITP::Message);
 
                    infer_type.push(ITP::UInt8);
 
                },
 
                PTV::Bool => { infer_type.push(ITP::Bool); },
 
                PTV::UInt8 => { infer_type.push(ITP::UInt8); },
 
                PTV::UInt16 => { infer_type.push(ITP::UInt16); },
 
                PTV::UInt32 => { infer_type.push(ITP::UInt32); },
 
                PTV::UInt64 => { infer_type.push(ITP::UInt64); },
 
                PTV::SInt8 => { infer_type.push(ITP::SInt8); },
 
                PTV::SInt16 => { infer_type.push(ITP::SInt16); },
 
                PTV::SInt32 => { infer_type.push(ITP::SInt32); },
 
                PTV::SInt64 => { infer_type.push(ITP::SInt64); },
 
                PTV::Character => { infer_type.push(ITP::Character); },
 
                PTV::String => {
 
                    infer_type.push(ITP::String);
 
                    infer_type.push(ITP::Character);
 
                },
 
                // Special markers
 
                PTV::IntegerLiteral => { unreachable!("integer literal type on variable type"); },
 
                PTV::Inferred => {
 
                    infer_type.push(ITP::Unknown);
 
                    has_inferred = true;
 
                },
 
                // With nested types
 
                PTV::Array => { infer_type.push(ITP::Array); },
 
                PTV::Input => { infer_type.push(ITP::Input); },
 
                PTV::Output => { infer_type.push(ITP::Output); },
 
                PTV::PolymorphicArgument(belongs_to_definition, poly_arg_idx) => {
 
                    let poly_arg_idx = *poly_arg_idx;
 
                    if use_definitions_known_poly_args {
 
                        // Refers to polymorphic argument on procedure we're currently processing.
 
                        // This argument is already known.
 
                        debug_assert_eq!(*belongs_to_definition, self.definition_type.definition_id());
 
                        debug_assert!((poly_arg_idx as usize) < self.poly_vars.len());
 

	
 
                        Self::determine_inference_type_from_concrete_type(
 
                            &mut infer_type, &self.poly_vars[poly_arg_idx as usize].parts
 
                        );
 
                    } else {
 
                        // Polymorphic argument has to be inferred
 
                        has_markers = true;
 
                        has_inferred = true;
 
                        infer_type.push(ITP::Marker(poly_arg_idx));
 
                        infer_type.push(ITP::Unknown)
 
                    }
 
                },
 
                PTV::Definition(definition_id, num_embedded) => {
 
                    infer_type.push(ITP::Instance(*definition_id, *num_embedded));
 
                }
 
            }
 
        }
 

	
 
        InferenceType::new(has_markers, !has_inferred, infer_type)
 
    }
 

	
 
    /// Determines the inference type from an already concrete type. Applies the
 
    /// various type "hacks" inside the type inferencer.
 
    fn determine_inference_type_from_concrete_type(parser_type: &mut Vec<InferenceTypePart>, concrete_type: &[ConcreteTypePart]) {
 
        use InferenceTypePart as ITP;
 
        use ConcreteTypePart as CTP;
 

	
 
        for concrete_part in concrete_type {
 
            match concrete_part {
 
                CTP::Void => parser_type.push(ITP::Void),
 
                CTP::Message => parser_type.push(ITP::Message),
 
                CTP::Message => {
 
                    parser_type.push(ITP::Message);
 
                    parser_type.push(ITP::UInt8)
 
                },
 
                CTP::Bool => parser_type.push(ITP::Bool),
 
                CTP::UInt8 => parser_type.push(ITP::UInt8),
 
                CTP::UInt16 => parser_type.push(ITP::UInt16),
 
                CTP::UInt32 => parser_type.push(ITP::UInt32),
 
                CTP::UInt64 => parser_type.push(ITP::UInt64),
 
                CTP::SInt8 => parser_type.push(ITP::SInt8),
 
                CTP::SInt16 => parser_type.push(ITP::SInt16),
 
                CTP::SInt32 => parser_type.push(ITP::SInt32),
 
                CTP::SInt64 => parser_type.push(ITP::SInt64),
 
                CTP::Character => parser_type.push(ITP::Character),
 
                CTP::String => {
 
                    parser_type.push(ITP::String);
 
                    parser_type.push(ITP::Character)
 
                },
 
                CTP::Array => parser_type.push(ITP::Array),
 
                CTP::Slice => parser_type.push(ITP::Slice),
 
                CTP::Input => parser_type.push(ITP::Input),
 
                CTP::Output => parser_type.push(ITP::Output),
 
                CTP::Instance(id, num) => parser_type.push(ITP::Instance(*id, *num)),
 
                CTP::Function(_, _) => unreachable!("function type during concrete to inference type conversion"),
 
                CTP::Component(_, _) => unreachable!("component type during concrete to inference type conversion"),
 
            }
 
        }
 
    }
 

	
 
    /// Construct an error when an expression's type does not match. This
 
    /// happens if we infer the expression type from its arguments (e.g. the
 
    /// expression type of an addition operator is the type of the arguments)
 
    /// But the expression type was already set due to our parent (e.g. an
 
    /// "if statement" or a "logical not" always expecting a boolean)
 
    fn construct_expr_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId, arg_id: ExpressionId
 
    ) -> ParseError {
 
        // TODO: Expand and provide more meaningful information for humans
 
        let expr = &ctx.heap[expr_id];
 
        let arg_expr = &ctx.heap[arg_id];
 
        let expr_idx = expr.get_unique_id_in_definition();
 
        let arg_expr_idx = arg_expr.get_unique_id_in_definition();
 
        let expr_type = &self.expr_types[expr_idx as usize].expr_type;
 
        let arg_type = &self.expr_types[arg_expr_idx as usize].expr_type;
 

	
 
        return ParseError::new_error_at_span(
 
            &ctx.module().source, expr.operation_span(), format!(
 
                "incompatible types: this expression expected a '{}'",
 
                expr_type.display_name(&ctx.heap)
 
            )
 
        ).with_info_at_span(
 
            &ctx.module().source, arg_expr.full_span(), format!(
 
                "but this expression yields a '{}'",
 
                arg_type.display_name(&ctx.heap)
 
            )
 
        )
 
    }
 

	
 
    fn construct_arg_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId
 
    ) -> ParseError {
 
        let expr = &ctx.heap[expr_id];
 
        let arg1 = &ctx.heap[arg1_id];
 
        let arg2 = &ctx.heap[arg2_id];
 

	
 
        let arg1_idx = arg1.get_unique_id_in_definition();
 
        let arg1_type = &self.expr_types[arg1_idx as usize].expr_type;
 
        let arg2_idx = arg2.get_unique_id_in_definition();
 
        let arg2_type = &self.expr_types[arg2_idx as usize].expr_type;
 

	
 
        return ParseError::new_error_str_at_span(
 
            &ctx.module().source, expr.operation_span(),
 
            "incompatible types: cannot apply this expression"
 
        ).with_info_at_span(
 
            &ctx.module().source, arg1.full_span(), format!(
 
                "Because this expression has type '{}'",
 
                arg1_type.display_name(&ctx.heap)
 
            )
 
        ).with_info_at_span(
 
            &ctx.module().source, arg2.full_span(), format!(
 
                "But this expression has type '{}'",
 
                arg2_type.display_name(&ctx.heap)
 
            )
 
        )
 
    }
 

	
 
    fn construct_template_type_error(
 
        &self, ctx: &Ctx, expr_id: ExpressionId, template: &[InferenceTypePart]
 
    ) -> ParseError {
 
        let expr = &ctx.heap[expr_id];
 
        let expr_idx = expr.get_unique_id_in_definition();
 
        let expr_type = &self.expr_types[expr_idx as usize].expr_type;
 

	
 
        return ParseError::new_error_at_span(
 
            &ctx.module().source, expr.full_span(), format!(
 
                "incompatible types: got a '{}' but expected a '{}'",
 
                expr_type.display_name(&ctx.heap), 
 
                InferenceType::partial_display_name(&ctx.heap, template)
 
            )
 
        )
 
    }
 

	
 
    /// Constructs a human interpretable error in the case that type inference
 
    /// on a polymorphic variable to a function call or literal construction 
 
    /// failed. This may only be caused by a pair of inference types (which may 
 
    /// come from arguments or the return type) having two different inferred 
 
    /// values for that polymorphic variable.
 
    ///
 
    /// So we find this pair and construct the error using it.
 
    ///
 
    /// We assume that the expression is a function call or a struct literal,
 
    /// and that an actual error has occurred.
 
    fn construct_poly_arg_error(
 
        ctx: &Ctx, poly_data: &ExtraData, expr_id: ExpressionId
 
    ) -> ParseError {
 
        // Helper function to check for polymorph mismatch between two inference
 
        // types.
 
        fn has_poly_mismatch<'a>(type_a: &'a InferenceType, type_b: &'a InferenceType) -> Option<(u32, &'a [InferenceTypePart], &'a [InferenceTypePart])> {
 
            if !type_a.has_marker || !type_b.has_marker {
 
                return None
 
            }
 

	
 
            for (marker_a, section_a) in type_a.marker_iter() {
 
                for (marker_b, section_b) in type_b.marker_iter() {
 
                    if marker_a != marker_b {
 
                        // Not the same polymorphic variable
 
                        continue;
 
                    }
 

	
 
                    if !InferenceType::check_subtrees(section_a, 0, section_b, 0) {
 
                        // Not compatible
 
                        return Some((marker_a, section_a, section_b))
 
                    }
 
                }
 
            }
 

	
 
            None
 
        }
 

	
 
        // Helper function to check for polymorph mismatch between an inference
 
        // type and the polymorphic variables in the poly_data struct.
 
        fn has_explicit_poly_mismatch<'a>(
 
            poly_vars: &'a [InferenceType], arg: &'a InferenceType
 
        ) -> Option<(u32, &'a [InferenceTypePart], &'a [InferenceTypePart])> {
 
            for (marker, section) in arg.marker_iter() {
 
                debug_assert!((marker as usize) < poly_vars.len());
 
                let poly_section = &poly_vars[marker as usize].parts;
 
                if !InferenceType::check_subtrees(poly_section, 0, section, 0) {
 
                    return Some((marker, poly_section, section))
 
                }
 
            }
 

	
 
            None
 
        }
 

	
 
        // Helpers function to retrieve polyvar name and definition name
 
        fn get_poly_var_and_definition_name<'a>(ctx: &'a Ctx, poly_var_idx: u32, definition_id: DefinitionId) -> (&'a str, &'a str) {
 
            let definition = &ctx.heap[definition_id];
 
            let poly_var = definition.poly_vars()[poly_var_idx as usize].value.as_str();
 
            let func_name = definition.identifier().value.as_str();
 

	
 
            (poly_var, func_name)
 
        }
 

	
 
        // Helper function to construct initial error
 
        fn construct_main_error(ctx: &Ctx, poly_data: &ExtraData, poly_var_idx: u32, expr: &Expression) -> ParseError {
 
            match expr {
 
                Expression::Call(expr) => {
 
                    let (poly_var, func_name) = get_poly_var_and_definition_name(ctx, poly_var_idx, poly_data.definition_id);
 
                    return ParseError::new_error_at_span(
 
                        &ctx.module().source, expr.func_span, format!(
 
                            "Conflicting type for polymorphic variable '{}' of '{}'",
 
                            poly_var, func_name
 
                        )
 
                    )
 
                },
 
                Expression::Literal(expr) => {
 
                    let (poly_var, type_name) = get_poly_var_and_definition_name(ctx, poly_var_idx, poly_data.definition_id);
 
                    return ParseError::new_error_at_span(
 
                        &ctx.module().source, expr.span, format!(
 
                            "Conflicting type for polymorphic variable '{}' of instantiation of '{}'",
 
                            poly_var, type_name
 
                        )
 
                    );
 
                },
 
                Expression::Select(expr) => {
 
                    let (poly_var, struct_name) = get_poly_var_and_definition_name(ctx, poly_var_idx, poly_data.definition_id);
 
                    return ParseError::new_error_at_span(
 
                        &ctx.module().source, expr.full_span, format!(
 
                            "Conflicting type for polymorphic variable '{}' while accessing field '{}' of '{}'",
 
                            poly_var, expr.field_name.value.as_str(), struct_name
 
                        )
 
                    )
 
                }
 
                _ => unreachable!("called construct_poly_arg_error without an expected expression, got: {:?}", expr)
 
            }
 
        }
 

	
 
        // Actual checking
 
        let expr = &ctx.heap[expr_id];
 
        let (expr_args, expr_return_name) = match expr {
 
            Expression::Call(expr) => 
 
                (
 
                    expr.arguments.clone(),
 
                    "return type"
 
                ),
 
            Expression::Literal(expr) => {
 
                let expressions = match &expr.value {
 
                    Literal::Struct(v) => v.fields.iter()
 
                        .map(|f| f.value)
 
                        .collect(),
 
                    Literal::Enum(_) => Vec::new(),
 
                    Literal::Union(v) => v.values.clone(),
 
                    _ => unreachable!()
 
                };
 

	
 
                ( expressions, "literal" )
 
            },
 
            Expression::Select(expr) =>
 
                // Select expression uses the polymorphic variables of the 
 
                // struct it is accessing, so get the subject expression.
 
                (
 
                    vec![expr.subject],
 
                    "selected field"
 
                ),
 
            _ => unreachable!(),
 
        };
 

	
 
        // - check return type with itself
 
        if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(
 
            &poly_data.returned, &poly_data.returned
 
        ) {
 
            return construct_main_error(ctx, poly_data, poly_idx, expr)
 
                .with_info_at_span(
 
                    &ctx.module().source, expr.full_span(), format!(
 
                        "The {} inferred the conflicting types '{}' and '{}'",
 
                        expr_return_name,
 
                        InferenceType::partial_display_name(&ctx.heap, section_a),
 
                        InferenceType::partial_display_name(&ctx.heap, section_b)
 
                    )
 
                );
 
        }
 

	
 
        // - check arguments with each other argument and with return type
 
        for (arg_a_idx, arg_a) in poly_data.embedded.iter().enumerate() {
 
            for (arg_b_idx, arg_b) in poly_data.embedded.iter().enumerate() {
 
                if arg_b_idx > arg_a_idx {
 
                    break;
 
                }
 

	
 
                if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(&arg_a, &arg_b) {
 
                    let error = construct_main_error(ctx, poly_data, poly_idx, expr);
 
                    if arg_a_idx == arg_b_idx {
 
                        // Same argument
 
                        let arg = &ctx.heap[expr_args[arg_a_idx]];
 
                        return error.with_info_at_span(
 
                            &ctx.module().source, arg.full_span(), format!(
 
                                "This argument inferred the conflicting types '{}' and '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_a),
 
                                InferenceType::partial_display_name(&ctx.heap, section_b)
 
                            )
 
                        );
 
                    } else {
 
                        let arg_a = &ctx.heap[expr_args[arg_a_idx]];
 
                        let arg_b = &ctx.heap[expr_args[arg_b_idx]];
 
                        return error.with_info_at_span(
 
                            &ctx.module().source, arg_a.full_span(), format!(
 
                                "This argument inferred it to '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_a)
 
                            )
 
                        ).with_info_at_span(
 
                            &ctx.module().source, arg_b.full_span(), format!(
 
                                "While this argument inferred it to '{}'",
 
                                InferenceType::partial_display_name(&ctx.heap, section_b)
 
                            )
 
                        )
 
                    }
 
                }
 
            }
 

	
 
            // Check with return type
 
            if let Some((poly_idx, section_arg, section_ret)) = has_poly_mismatch(arg_a, &poly_data.returned) {
 
                let arg = &ctx.heap[expr_args[arg_a_idx]];
 
                return construct_main_error(ctx, poly_data, poly_idx, expr)
 
                    .with_info_at_span(
 
                        &ctx.module().source, arg.full_span(), format!(
 
                            "This argument inferred it to '{}'",
 
                            InferenceType::partial_display_name(&ctx.heap, section_arg)
 
                        )
 
                    )
 
                    .with_info_at_span(
 
                        &ctx.module().source, expr.full_span(), format!(
 
                            "While the {} inferred it to '{}'",
 
                            expr_return_name,
 
                            InferenceType::partial_display_name(&ctx.heap, section_ret)
 
                        )
 
                    );
 
            }
 
        }
 

	
 
        // Now check against the explicitly specified polymorphic variables (if
 
        // any).
 
        for (arg_idx, arg) in poly_data.embedded.iter().enumerate() {
 
            if let Some((poly_idx, poly_section, arg_section)) = has_explicit_poly_mismatch(&poly_data.poly_vars, arg) {
 
                let arg = &ctx.heap[expr_args[arg_idx]];
 
                return construct_main_error(ctx, poly_data, poly_idx, expr)
 
                    .with_info_at_span(
 
                        &ctx.module().source, arg.full_span(), format!(
 
                            "The polymorphic variable has type '{}' (which might have been partially inferred) while the argument inferred it to '{}'",
 
                            InferenceType::partial_display_name(&ctx.heap, poly_section),
 
                            InferenceType::partial_display_name(&ctx.heap, arg_section)
 
                        )
 
                    );
 
            }
 
        }
 

	
 
        if let Some((poly_idx, poly_section, ret_section)) = has_explicit_poly_mismatch(&poly_data.poly_vars, &poly_data.returned) {
 
            return construct_main_error(ctx, poly_data, poly_idx, expr)
 
                .with_info_at_span(
 
                    &ctx.module().source, expr.full_span(), format!(
 
                        "The polymorphic variable has type '{}' (which might have been partially inferred) while the {} inferred it to '{}'",
 
                        InferenceType::partial_display_name(&ctx.heap, poly_section),
 
                        expr_return_name,
 
                        InferenceType::partial_display_name(&ctx.heap, ret_section)
 
                    )
 
                )
 
        }
 

	
 
        unreachable!("construct_poly_arg_error without actual error found?")
 
    }
 
}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use super::*;
 
    use crate::protocol::arena::Id;
 
    use InferenceTypePart as ITP;
 
    use InferenceType as IT;
 

	
 
    #[test]
 
    fn test_single_part_inference() {
 
        // lhs argument inferred from rhs
 
        let pairs = [
 
            (ITP::NumberLike, ITP::UInt8),
 
            (ITP::IntegerLike, ITP::SInt32),
 
            (ITP::Unknown, ITP::UInt64),
 
            (ITP::Unknown, ITP::Bool)
 
        ];
 
        for (lhs, rhs) in pairs.iter() {
 
            // Using infer-both
 
            let mut lhs_type = IT::new(false, false, vec![lhs.clone()]);
 
            let mut rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = unsafe{ IT::infer_subtrees_for_both_types(
 
                &mut lhs_type, 0, &mut rhs_type, 0
 
            ) };
 
            assert_eq!(DualInferenceResult::First, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 

	
 
            // Using infer-single
 
            let mut lhs_type = IT::new(false, false, vec![lhs.clone()]);
 
            let rhs_type = IT::new(false, true, vec![rhs.clone()]);
 
            let result = IT::infer_subtree_for_single_type(
 
                &mut lhs_type, 0, &rhs_type.parts, 0, false
 
            );
 
            assert_eq!(SingleInferenceResult::Modified, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 
        }
 
    }
 

	
 
    #[test]
 
    fn test_multi_part_inference() {
 
        let pairs = [
 
            (vec![ITP::ArrayLike, ITP::NumberLike], vec![ITP::Slice, ITP::SInt8]),
 
            (vec![ITP::Unknown], vec![ITP::Input, ITP::Array, ITP::String, ITP::Character]),
 
            (vec![ITP::PortLike, ITP::SInt32], vec![ITP::Input, ITP::SInt32]),
 
            (vec![ITP::Unknown], vec![ITP::Output, ITP::SInt32]),
 
            (
 
                vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Unknown, ITP::Output, ITP::Unknown],
 
                vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Array, ITP::SInt32, ITP::Output, ITP::SInt32]
 
            )
 
        ];
 

	
 
        for (lhs, rhs) in pairs.iter() {
 
            let mut lhs_type = IT::new(false, false, lhs.clone());
 
            let mut rhs_type = IT::new(false, true, rhs.clone());
 
            let result = unsafe{ IT::infer_subtrees_for_both_types(
 
                &mut lhs_type, 0, &mut rhs_type, 0
 
            ) };
 
            assert_eq!(DualInferenceResult::First, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts);
 

	
 
            let mut lhs_type = IT::new(false, false, lhs.clone());
 
            let rhs_type = IT::new(false, true, rhs.clone());
 
            let result = IT::infer_subtree_for_single_type(
 
                &mut lhs_type, 0, &rhs_type.parts, 0, false
 
            );
 
            assert_eq!(SingleInferenceResult::Modified, result);
 
            assert_eq!(lhs_type.parts, rhs_type.parts)
 
        }
 
    }
 
}
 
\ No newline at end of file
src/protocol/parser/type_table.rs
Show inline comments
 
@@ -162,1537 +162,1537 @@ impl DefinedType {
 
                for (monomorph_idx, monomorph) in definition.monomorphs.iter().enumerate() {
 
                    if concrete_types_match(&monomorph.concrete_type, concrete_type, true) {
 
                        return Some(monomorph_idx);
 
                    }
 
                }
 
            },
 
            DTV::Struct(definition) => {
 
                for (monomorph_idx, monomorph) in definition.monomorphs.iter().enumerate() {
 
                    if concrete_types_match(&monomorph.concrete_type, concrete_type, true) {
 
                        return Some(monomorph_idx);
 
                    }
 
                }
 
            },
 
            DTV::Function(definition) => {
 
                for (monomorph_idx, monomorph) in definition.monomorphs.iter().enumerate() {
 
                    if concrete_types_match(&monomorph.concrete_type, concrete_type, false) {
 
                        return Some(monomorph_idx)
 
                    }
 
                }
 
            }
 
            DTV::Component(definition) => {
 
                for (monomorph_idx, monomorph) in definition.monomorphs.iter().enumerate() {
 
                    if concrete_types_match(&monomorph.concrete_type, concrete_type, false) {
 
                        return Some(monomorph_idx)
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // Nothing matched
 
        return None;
 
    }
 

	
 
    /// Retrieves size and alignment of the particular type's monomorph if it
 
    /// has been layed out in memory.
 
    pub(crate) fn get_monomorph_size_alignment(&self, idx: usize) -> Option<(usize, usize)> {
 
        use DefinedTypeVariant as DTV;
 
        let (size, alignment) = match &self.definition {
 
            DTV::Enum(def) => {
 
                debug_assert!(idx == 0);
 
                (def.size, def.alignment)
 
            },
 
            DTV::Union(def) => {
 
                let monomorph = &def.monomorphs[idx];
 
                (monomorph.stack_size, monomorph.stack_alignment)
 
            },
 
            DTV::Struct(def) => {
 
                let monomorph = &def.monomorphs[idx];
 
                (monomorph.size, monomorph.alignment)
 
            },
 
            DTV::Function(_) | DTV::Component(_) => {
 
                // Type table should never be able to arrive here during layout
 
                // of types. Types may only contain function prototypes.
 
                unreachable!("retrieving size and alignment of procedure type");
 
            }
 
        };
 

	
 
        if size == 0 && alignment == 0 {
 
            // The "marker" for when the type has not been layed out yet. Even
 
            // for zero-size types we will set alignment to `1` to simplify
 
            // alignment calculations.
 
            return None;
 
        } else {
 
            return Some((size, alignment));
 
        }
 
    }
 
}
 

	
 
pub enum DefinedTypeVariant {
 
    Enum(EnumType),
 
    Union(UnionType),
 
    Struct(StructType),
 
    Function(FunctionType),
 
    Component(ComponentType)
 
}
 

	
 
impl DefinedTypeVariant {
 
    pub(crate) fn type_class(&self) -> TypeClass {
 
        match self {
 
            DefinedTypeVariant::Enum(_) => TypeClass::Enum,
 
            DefinedTypeVariant::Union(_) => TypeClass::Union,
 
            DefinedTypeVariant::Struct(_) => TypeClass::Struct,
 
            DefinedTypeVariant::Function(_) => TypeClass::Function,
 
            DefinedTypeVariant::Component(_) => TypeClass::Component
 
        }
 
    }
 

	
 
    pub(crate) fn as_struct(&self) -> &StructType {
 
        match self {
 
            DefinedTypeVariant::Struct(v) => v,
 
            _ => unreachable!("Cannot convert {} to struct variant", self.type_class())
 
        }
 
    }
 

	
 
    pub(crate) fn as_struct_mut(&mut self) -> &mut StructType {
 
        match self {
 
            DefinedTypeVariant::Struct(v) => v,
 
            _ => unreachable!("Cannot convert {} to struct variant", self.type_class())
 
        }
 
    }
 

	
 
    pub(crate) fn as_enum(&self) -> &EnumType {
 
        match self {
 
            DefinedTypeVariant::Enum(v) => v,
 
            _ => unreachable!("Cannot convert {} to enum variant", self.type_class())
 
        }
 
    }
 

	
 
    pub(crate) fn as_enum_mut(&mut self) -> &mut EnumType {
 
        match self {
 
            DefinedTypeVariant::Enum(v) => v,
 
            _ => unreachable!("Cannot convert {} to enum variant", self.type_class())
 
        }
 
    }
 

	
 
    pub(crate) fn as_union(&self) -> &UnionType {
 
        match self {
 
            DefinedTypeVariant::Union(v) => v,
 
            _ => unreachable!("Cannot convert {} to union variant", self.type_class())
 
        }
 
    }
 

	
 
    pub(crate) fn as_union_mut(&mut self) -> &mut UnionType {
 
        match self {
 
            DefinedTypeVariant::Union(v) => v,
 
            _ => unreachable!("Cannot convert {} to union variant", self.type_class())
 
        }
 
    }
 

	
 
    pub(crate) fn procedure_monomorphs(&self) -> &Vec<ProcedureMonomorph> {
 
        use DefinedTypeVariant::*;
 

	
 
        match self {
 
            Function(v) => &v.monomorphs,
 
            Component(v) => &v.monomorphs,
 
            _ => unreachable!("cannot get procedure monomorphs from {}", self.type_class()),
 
        }
 
    }
 

	
 
    pub(crate) fn procedure_monomorphs_mut(&mut self) -> &mut Vec<ProcedureMonomorph> {
 
        use DefinedTypeVariant::*;
 

	
 
        match self {
 
            Function(v) => &mut v.monomorphs,
 
            Component(v) => &mut v.monomorphs,
 
            _ => unreachable!("cannot get procedure monomorphs from {}", self.type_class()),
 
        }
 
    }
 
}
 

	
 
pub struct PolymorphicVariable {
 
    identifier: Identifier,
 
    is_in_use: bool, // a polymorphic argument may be defined, but not used by the type definition
 
}
 

	
 
/// Data associated with a monomorphized procedure type. Has the wrong name,
 
/// because it will also be used to store expression data for a non-polymorphic
 
/// procedure. (in that case, there will only ever be one)
 
pub struct ProcedureMonomorph {
 
    // Expression data for one particular monomorph
 
    pub concrete_type: ConcreteType,
 
    pub expr_data: Vec<MonomorphExpression>,
 
}
 

	
 
/// `EnumType` is the classical C/C++ enum type. It has various variants with
 
/// an assigned integer value. The integer values may be user-defined,
 
/// compiler-defined, or a mix of the two. If a user assigns the same enum
 
/// value multiple times, we assume the user is an expert and we consider both
 
/// variants to be equal to one another.
 
pub struct EnumType {
 
    pub variants: Vec<EnumVariant>,
 
    pub monomorphs: Vec<EnumMonomorph>,
 
    pub minimum_tag_value: i64,
 
    pub maximum_tag_value: i64,
 
    pub tag_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
}
 

	
 
// TODO: Also support maximum u64 value
 
pub struct EnumVariant {
 
    pub identifier: Identifier,
 
    pub value: i64,
 
}
 

	
 
pub struct EnumMonomorph {
 
    pub concrete_type: ConcreteType,
 
}
 

	
 
/// `UnionType` is the algebraic datatype (or sum type, or discriminated union).
 
/// A value is an element of the union, identified by its tag, and may contain
 
/// a single subtype.
 
/// For potentially infinite types (i.e. a tree, or a linked list) only unions
 
/// can break the infinite cycle. So when we lay out these unions in memory we
 
/// will reserve enough space on the stack for all union variants that do not
 
/// cause "type loops" (i.e. a union `A` with a variant containing a struct
 
/// `B`). And we will reserve enough space on the heap (and store a pointer in
 
/// the union) for all variants which do cause type loops (i.e. a union `A`
 
/// with a variant to a struct `B` that contains the union `A` again).
 
pub struct UnionType {
 
    pub variants: Vec<UnionVariant>,
 
    pub monomorphs: Vec<UnionMonomorph>,
 
    pub tag_type: ConcreteType,
 
    pub tag_size: usize,
 
}
 

	
 
pub struct UnionVariant {
 
    pub identifier: Identifier,
 
    pub embedded: Vec<ParserType>, // zero-length does not have embedded values
 
    pub tag_value: i64,
 
}
 

	
 
pub struct UnionMonomorph {
 
    pub concrete_type: ConcreteType,
 
    pub variants: Vec<UnionMonomorphVariant>,
 
    // stack_size is the size of the union on the stack, includes the tag
 
    pub stack_size: usize,
 
    pub stack_alignment: usize,
 
    // heap_size contains the allocated size of the union in the case it
 
    // is used to break a type loop. If it is 0, then it doesn't require
 
    // allocation and lives entirely on the stack.
 
    pub heap_size: usize,
 
    pub heap_alignment: usize,
 
}
 

	
 
pub struct UnionMonomorphVariant {
 
    pub lives_on_heap: bool,
 
    pub embedded: Vec<UnionMonomorphEmbedded>,
 
}
 

	
 
pub struct UnionMonomorphEmbedded {
 
    pub concrete_type: ConcreteType,
 
    // Note that the meaning of the offset (and alignment) depend on whether or
 
    // not the variant lives on the stack/heap. If it lives on the stack then
 
    // they refer to the offset from the start of the union value (so the first
 
    // embedded type lives at a non-zero offset, because the union tag sits in
 
    // the front). If it lives on the heap then it refers to the offset from the
 
    // allocated memory region (so the first embedded type lives at a 0 offset).
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// `StructType` is a generic C-like struct type (or record type, or product
 
/// type) type.
 
pub struct StructType {
 
    pub fields: Vec<StructField>,
 
    pub monomorphs: Vec<StructMonomorph>,
 
}
 

	
 
pub struct StructField {
 
    pub identifier: Identifier,
 
    pub parser_type: ParserType,
 
}
 

	
 
pub struct StructMonomorph {
 
    pub concrete_type: ConcreteType,
 
    pub fields: Vec<StructMonomorphField>,
 
    pub size: usize,
 
    pub alignment: usize,
 
}
 

	
 
pub struct StructMonomorphField {
 
    pub concrete_type: ConcreteType,
 
    pub size: usize,
 
    pub alignment: usize,
 
    pub offset: usize,
 
}
 

	
 
/// `FunctionType` is what you expect it to be: a particular function's
 
/// signature.
 
pub struct FunctionType {
 
    pub return_types: Vec<ParserType>,
 
    pub arguments: Vec<FunctionArgument>,
 
    pub monomorphs: Vec<ProcedureMonomorph>,
 
}
 

	
 
pub struct ComponentType {
 
    pub variant: ComponentVariant,
 
    pub arguments: Vec<FunctionArgument>,
 
    pub monomorphs: Vec<ProcedureMonomorph>
 
}
 

	
 
pub struct FunctionArgument {
 
    identifier: Identifier,
 
    parser_type: ParserType,
 
}
 

	
 
/// Represents the data associated with a single expression after type inference
 
/// for a monomorph (or just the normal expression types, if dealing with a
 
/// non-polymorphic function/component).
 
pub struct MonomorphExpression {
 
    // The output type of the expression. Note that for a function it is not the
 
    // function's signature but its return type
 
    pub(crate) expr_type: ConcreteType,
 
    // Has multiple meanings: the field index for select expressions, the
 
    // monomorph index for polymorphic function calls or literals. Negative
 
    // values are never used, but used to catch programming errors.
 
    pub(crate) field_or_monomorph_idx: i32,
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Type table
 
//------------------------------------------------------------------------------
 

	
 
// Programmer note: keep this struct free of dynamically allocated memory
 
#[derive(Clone)]
 
struct TypeLoopBreadcrumb {
 
    definition_id: DefinitionId,
 
    monomorph_idx: usize,
 
    next_member: usize,
 
    next_embedded: usize, // for unions, the index into the variant's embedded types
 
}
 

	
 
#[derive(Clone)]
 
struct MemoryBreadcrumb {
 
    definition_id: DefinitionId,
 
    monomorph_idx: usize,
 
    next_member: usize,
 
    next_embedded: usize,
 
    first_size_alignment_idx: usize,
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum TypeLoopResult {
 
    TypeExists,
 
    PushBreadcrumb(DefinitionId, ConcreteType),
 
    TypeLoop(usize), // index into vec of breadcrumbs at which the type matched
 
}
 

	
 
enum MemoryLayoutResult {
 
    TypeExists(usize, usize), // (size, alignment)
 
    PushBreadcrumb(MemoryBreadcrumb),
 
}
 

	
 
// TODO: @Optimize, initial memory-unoptimized implementation
 
struct TypeLoopEntry {
 
    definition_id: DefinitionId,
 
    monomorph_idx: usize,
 
    is_union: bool,
 
}
 

	
 
struct TypeLoop {
 
    members: Vec<TypeLoopEntry>
 
}
 

	
 
pub struct TypeTable {
 
    /// Lookup from AST DefinitionId to a defined type. Considering possible
 
    /// polymorphs is done inside the `DefinedType` struct.
 
    lookup: HashMap<DefinitionId, DefinedType>,
 
    /// Breadcrumbs left behind while trying to find type loops. Also used to
 
    /// determine sizes of types when all type loops are detected.
 
    type_loop_breadcrumbs: Vec<TypeLoopBreadcrumb>,
 
    type_loops: Vec<TypeLoop>,
 
    /// Stores all encountered types during type loop detection. Used afterwards
 
    /// to iterate over all types in order to compute size/alignment.
 
    encountered_types: Vec<TypeLoopEntry>,
 
    /// Breadcrumbs and temporary storage during memory layout computation.
 
    memory_layout_breadcrumbs: Vec<MemoryBreadcrumb>,
 
    size_alignment_stack: Vec<(usize, usize)>,
 
}
 

	
 
impl TypeTable {
 
    /// Construct a new type table without any resolved types.
 
    pub(crate) fn new() -> Self {
 
        Self{ 
 
            lookup: HashMap::new(), 
 
            type_loop_breadcrumbs: Vec::with_capacity(32),
 
            type_loops: Vec::with_capacity(8),
 
            encountered_types: Vec::with_capacity(32),
 
            memory_layout_breadcrumbs: Vec::with_capacity(32),
 
            size_alignment_stack: Vec::with_capacity(64),
 
        }
 
    }
 

	
 
    /// Iterates over all defined types (polymorphic and non-polymorphic) and
 
    /// add their types in two passes. In the first pass we will just add the
 
    /// base types (we will not consider monomorphs, and we will not compute
 
    /// byte sizes). In the second pass we will compute byte sizes of
 
    /// non-polymorphic types, and potentially the monomorphs that are embedded
 
    /// in those types.
 
    pub(crate) fn build_base_types(&mut self, modules: &mut [Module], ctx: &mut PassCtx) -> Result<(), ParseError> {
 
        // Make sure we're allowed to cast root_id to index into ctx.modules
 
        debug_assert!(modules.iter().all(|m| m.phase >= ModuleCompilationPhase::DefinitionsParsed));
 
        debug_assert!(self.lookup.is_empty());
 

	
 
        if cfg!(debug_assertions) {
 
            for (index, module) in modules.iter().enumerate() {
 
                debug_assert_eq!(index, module.root_id.index as usize);
 
            }
 
        }
 

	
 
        // Use context to guess hashmap size of the base types
 
        let reserve_size = ctx.heap.definitions.len();
 
        self.lookup.reserve(reserve_size);
 

	
 
        // Resolve all base types
 
        for definition_idx in 0..ctx.heap.definitions.len() {
 
            let definition_id = ctx.heap.definitions.get_id(definition_idx);
 
            let definition = &ctx.heap[definition_id];
 

	
 
            match definition {
 
                Definition::Enum(_) => self.build_base_enum_definition(modules, ctx, definition_id)?,
 
                Definition::Union(_) => self.build_base_union_definition(modules, ctx, definition_id)?,
 
                Definition::Struct(_) => self.build_base_struct_definition(modules, ctx, definition_id)?,
 
                Definition::Function(_) => self.build_base_function_definition(modules, ctx, definition_id)?,
 
                Definition::Component(_) => self.build_base_component_definition(modules, ctx, definition_id)?,
 
            }
 
        }
 

	
 
        debug_assert_eq!(self.lookup.len(), reserve_size, "mismatch in reserved size of type table"); // NOTE: Temp fix for builtin functions
 
        for module in modules.iter_mut() {
 
            module.phase = ModuleCompilationPhase::TypesAddedToTable;
 
        }
 

	
 
        // Go through all types again, lay out all types that are not
 
        // polymorphic. This might cause us to lay out types that are monomorphs
 
        // of polymorphic types.
 
        for definition_idx in 0..ctx.heap.definitions.len() {
 
            let definition_id = ctx.heap.definitions.get_id(definition_idx);
 
            let poly_type = self.lookup.get(&definition_id).unwrap();
 

	
 
            // Here we explicitly want to instantiate types which have no
 
            // polymorphic arguments (even if it has phantom polymorphic
 
            // arguments) because otherwise the user will see very weird
 
            // error messages.
 
            if poly_type.definition.type_class().is_data_type() && poly_type.poly_vars.is_empty() && poly_type.num_monomorphs() == 0 {
 
                self.detect_and_resolve_type_loops_for(
 
                    modules, ctx.heap,
 
                    ConcreteType{
 
                        parts: vec![ConcreteTypePart::Instance(definition_id, 0)]
 
                    },
 
                )?;
 
                self.lay_out_memory_for_encountered_types(ctx.arch);
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    /// Retrieves base definition from type table. We must be able to retrieve
 
    /// it as we resolve all base types upon type table construction (for now).
 
    /// However, in the future we might do on-demand type resolving, so return
 
    /// an option anyway
 
    pub(crate) fn get_base_definition(&self, definition_id: &DefinitionId) -> Option<&DefinedType> {
 
        self.lookup.get(&definition_id)
 
    }
 

	
 
    /// Returns the index into the monomorph type array if the procedure type
 
    /// already has a (reserved) monomorph.
 
    pub(crate) fn get_procedure_monomorph_index(&self, definition_id: &DefinitionId, types: &ConcreteType) -> Option<i32> {
 
        let def = self.lookup.get(definition_id).unwrap();
 
        let monos = def.definition.procedure_monomorphs();
 
        return monos.iter()
 
            .position(|v| v.concrete_type == *types)
 
            .map(|v| v as i32);
 
    }
 

	
 
    /// Returns a mutable reference to a procedure's monomorph expression data.
 
    /// Used by typechecker to fill in previously reserved type information
 
    pub(crate) fn get_procedure_expression_data_mut(&mut self, definition_id: &DefinitionId, monomorph_idx: i32) -> &mut ProcedureMonomorph {
 
        debug_assert!(monomorph_idx >= 0);
 
        let def = self.lookup.get_mut(definition_id).unwrap();
 
        let monomorphs = def.definition.procedure_monomorphs_mut();
 
        return &mut monomorphs[monomorph_idx as usize];
 
    }
 

	
 
    pub(crate) fn get_procedure_expression_data(&self, definition_id: &DefinitionId, monomorph_idx: i32) -> &ProcedureMonomorph {
 
        debug_assert!(monomorph_idx >= 0);
 
        let def = self.lookup.get(definition_id).unwrap();
 
        let monomorphs = def.definition.procedure_monomorphs();
 
        return &monomorphs[monomorph_idx as usize];
 
    }
 

	
 
    /// Reserves space for a monomorph of a polymorphic procedure. The index
 
    /// will point into a (reserved) slot of the array of expression types. The
 
    /// monomorph may NOT exist yet (because the reservation implies that we're
 
    /// going to be performing typechecking on it, and we don't want to
 
    /// check the same monomorph twice)
 
    pub(crate) fn reserve_procedure_monomorph_index(&mut self, definition_id: &DefinitionId, concrete_type: ConcreteType) -> i32 {
 
        let def = self.lookup.get_mut(definition_id).unwrap();
 
        let mono_types = def.definition.procedure_monomorphs_mut();
 
        debug_assert!(def.is_polymorph == (concrete_type.parts.len() != 1));
 
        debug_assert!(!mono_types.iter().any(|v| v.concrete_type == concrete_type));
 

	
 
        let mono_idx = mono_types.len();
 
        mono_types.push(ProcedureMonomorph{
 
            concrete_type,
 
            expr_data: Vec::new(),
 
        });
 

	
 
        return mono_idx as i32;
 
    }
 

	
 
    /// Adds a datatype polymorph to the type table. Will not add the
 
    /// monomorph if it is already present, or if the type's polymorphic
 
    /// variables are all unused.
 
    /// TODO: Fix signature
 
    pub(crate) fn add_data_monomorph(
 
        &mut self, modules: &[Module], heap: &Heap, arch: &TargetArch, definition_id: DefinitionId, concrete_type: ConcreteType
 
    ) -> Result<i32, ParseError> {
 
        debug_assert_eq!(definition_id, get_concrete_type_definition(&concrete_type));
 

	
 
        // Check if the monomorph already exists
 
        let poly_type = self.lookup.get_mut(&definition_id).unwrap();
 
        if let Some(idx) = poly_type.get_monomorph_index(&concrete_type) {
 
            return Ok(idx as i32);
 
        }
 

	
 
        // Doesn't exist, so instantiate a monomorph and determine its memory
 
        // layout.
 
        self.detect_and_resolve_type_loops_for(modules, heap, concrete_type)?;
 
        debug_assert_eq!(self.encountered_types[0].definition_id, definition_id);
 
        let mono_idx = self.encountered_types[0].monomorph_idx;
 
        self.lay_out_memory_for_encountered_types(arch);
 

	
 
        return Ok(mono_idx as i32);
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Building base types
 
    //--------------------------------------------------------------------------
 

	
 
    /// Builds the base type for an enum. Will not compute byte sizes
 
    fn build_base_enum_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base enum already built");
 
        let definition = ctx.heap[definition_id].as_enum();
 
        let root_id = definition.defined_in;
 

	
 
        // Determine enum variants
 
        let mut enum_value = -1;
 
        let mut variants = Vec::with_capacity(definition.variants.len());
 

	
 
        for variant in &definition.variants {
 
            if enum_value == i64::MAX {
 
                let source = &modules[definition.defined_in.index as usize].source;
 
                return Err(ParseError::new_error_str_at_span(
 
                    source, variant.identifier.span,
 
                    "this enum variant has an integer value that is too large"
 
                ));
 
            }
 

	
 
            enum_value += 1;
 
            if let EnumVariantValue::Integer(explicit_value) = variant.value {
 
                enum_value = explicit_value;
 
            }
 

	
 
            variants.push(EnumVariant{
 
                identifier: variant.identifier.clone(),
 
                value: enum_value,
 
            });
 
        }
 

	
 
        // Determine tag size
 
        let mut min_enum_value = 0;
 
        let mut max_enum_value = 0;
 
        if !variants.is_empty() {
 
            min_enum_value = variants[0].value;
 
            max_enum_value = variants[0].value;
 
            for variant in variants.iter().skip(1) {
 
                min_enum_value = min_enum_value.min(variant.value);
 
                max_enum_value = max_enum_value.max(variant.value);
 
            }
 
        }
 

	
 
        let (tag_type, size_and_alignment) = Self::variant_tag_type_from_values(min_enum_value, max_enum_value);
 

	
 
        // Enum names and polymorphic args do not conflict
 
        Self::check_identifier_collision(
 
            modules, root_id, &variants, |variant| &variant.identifier, "enum variant"
 
        )?;
 

	
 
        // Polymorphic arguments cannot appear as embedded types, because
 
        // they can only consist of integer variants.
 
        Self::check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 
        let poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 

	
 
        self.lookup.insert(definition_id, DefinedType {
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Enum(EnumType{
 
                variants,
 
                monomorphs: Vec::new(),
 
                minimum_tag_value: min_enum_value,
 
                maximum_tag_value: max_enum_value,
 
                tag_type,
 
                size: size_and_alignment,
 
                alignment: size_and_alignment
 
            }),
 
            poly_vars,
 
            is_polymorph: false,
 
        });
 

	
 
        return Ok(());
 
    }
 

	
 
    /// Builds the base type for a union. Will compute byte sizes.
 
    fn build_base_union_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base union already built");
 
        let definition = ctx.heap[definition_id].as_union();
 
        let root_id = definition.defined_in;
 

	
 
        // Check all variants and their embedded types
 
        let mut variants = Vec::with_capacity(definition.variants.len());
 
        let mut tag_counter = 0;
 
        for variant in &definition.variants {
 
            for embedded in &variant.value {
 
                Self::check_member_parser_type(
 
                    modules, ctx, root_id, embedded, false
 
                )?;
 
            }
 

	
 
            variants.push(UnionVariant{
 
                identifier: variant.identifier.clone(),
 
                embedded: variant.value.clone(),
 
                tag_value: tag_counter,
 
            });
 
            tag_counter += 1;
 
        }
 

	
 
        let mut max_tag_value = 0;
 
        if tag_counter != 0 {
 
            max_tag_value = tag_counter - 1
 
        }
 

	
 
        let (tag_type, tag_size) = Self::variant_tag_type_from_values(0, max_tag_value);
 

	
 
        // Make sure there are no conflicts in identifiers
 
        Self::check_identifier_collision(
 
            modules, root_id, &variants, |variant| &variant.identifier, "union variant"
 
        )?;
 
        Self::check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 

	
 
        // Construct internal representation of union
 
        let mut poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 
        for variant in &definition.variants {
 
            for embedded in &variant.value {
 
                Self::mark_used_polymorphic_variables(&mut poly_vars, embedded);
 
            }
 
        }
 

	
 
        let is_polymorph = poly_vars.iter().any(|arg| arg.is_in_use);
 

	
 
        self.lookup.insert(definition_id, DefinedType{
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Union(UnionType{
 
                variants,
 
                monomorphs: Vec::new(),
 
                tag_type,
 
                tag_size,
 
            }),
 
            poly_vars,
 
            is_polymorph
 
        });
 

	
 
        return Ok(());
 
    }
 

	
 
    /// Builds base struct type. Will not compute byte sizes.
 
    fn build_base_struct_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base struct already built");
 
        let definition = ctx.heap[definition_id].as_struct();
 
        let root_id = definition.defined_in;
 

	
 
        // Check all struct fields and construct internal representation
 
        let mut fields = Vec::with_capacity(definition.fields.len());
 

	
 
        for field in &definition.fields {
 
            Self::check_member_parser_type(
 
                modules, ctx, root_id, &field.parser_type, false
 
            )?;
 

	
 
            fields.push(StructField{
 
                identifier: field.field.clone(),
 
                parser_type: field.parser_type.clone(),
 
            });
 
        }
 

	
 
        // Make sure there are no conflicting variables
 
        Self::check_identifier_collision(
 
            modules, root_id, &fields, |field| &field.identifier, "struct field"
 
        )?;
 
        Self::check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 

	
 
        // Construct base type in table
 
        let mut poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 
        for field in &fields {
 
            Self::mark_used_polymorphic_variables(&mut poly_vars, &field.parser_type);
 
        }
 

	
 
        let is_polymorph = poly_vars.iter().any(|arg| arg.is_in_use);
 

	
 
        self.lookup.insert(definition_id, DefinedType{
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Struct(StructType{
 
                fields,
 
                monomorphs: Vec::new(),
 
            }),
 
            poly_vars,
 
            is_polymorph
 
        });
 

	
 
        return Ok(())
 
    }
 

	
 
    /// Builds base function type.
 
    fn build_base_function_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base function already built");
 
        let definition = ctx.heap[definition_id].as_function();
 
        let root_id = definition.defined_in;
 

	
 
        // Check and construct return types and argument types.
 
        debug_assert_eq!(definition.return_types.len(), 1, "not one return type"); // TODO: @ReturnValues
 
        for return_type in &definition.return_types {
 
            Self::check_member_parser_type(
 
                modules, ctx, root_id, return_type, definition.builtin
 
            )?;
 
        }
 

	
 
        let mut arguments = Vec::with_capacity(definition.parameters.len());
 
        for parameter_id in &definition.parameters {
 
            let parameter = &ctx.heap[*parameter_id];
 
            Self::check_member_parser_type(
 
                modules, ctx, root_id, &parameter.parser_type, definition.builtin
 
            )?;
 

	
 
            arguments.push(FunctionArgument{
 
                identifier: parameter.identifier.clone(),
 
                parser_type: parameter.parser_type.clone(),
 
            });
 
        }
 

	
 
        // Check conflict of identifiers
 
        Self::check_identifier_collision(
 
            modules, root_id, &arguments, |arg| &arg.identifier, "function argument"
 
        )?;
 
        Self::check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 

	
 
        // Construct internal representation of function type
 
        let mut poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 
        for return_type in &definition.return_types {
 
            Self::mark_used_polymorphic_variables(&mut poly_vars, return_type);
 
        }
 
        for argument in &arguments {
 
            Self::mark_used_polymorphic_variables(&mut poly_vars, &argument.parser_type);
 
        }
 

	
 
        let is_polymorph = poly_vars.iter().any(|arg| arg.is_in_use);
 

	
 
        self.lookup.insert(definition_id, DefinedType{
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Function(FunctionType{
 
                return_types: definition.return_types.clone(),
 
                arguments,
 
                monomorphs: Vec::new(),
 
            }),
 
            poly_vars,
 
            is_polymorph
 
        });
 

	
 
        return Ok(());
 
    }
 

	
 
    /// Builds base component type.
 
    fn build_base_component_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        debug_assert!(self.lookup.contains_key(&definition_id), "base component already built");
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base component already built");
 

	
 
        let definition = &ctx.heap[definition_id].as_component();
 
        let root_id = definition.defined_in;
 

	
 
        // Check the argument types
 
        let mut arguments = Vec::with_capacity(definition.parameters.len());
 
        for parameter_id in &definition.parameters {
 
            let parameter = &ctx.heap[*parameter_id];
 
            Self::check_member_parser_type(
 
                modules, ctx, root_id, &parameter.parser_type, false
 
            )?;
 

	
 
            arguments.push(FunctionArgument{
 
                identifier: parameter.identifier.clone(),
 
                parser_type: parameter.parser_type.clone(),
 
            });
 
        }
 

	
 
        // Check conflict of identifiers
 
        Self::check_identifier_collision(
 
            modules, root_id, &arguments, |arg| &arg.identifier, "connector argument"
 
        )?;
 
        Self::check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 

	
 
        // Construct internal representation of component
 
        let mut poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 
        for argument in &arguments {
 
            Self::mark_used_polymorphic_variables(&mut poly_vars, &argument.parser_type);
 
        }
 

	
 
        let is_polymorph = poly_vars.iter().any(|arg| arg.is_in_use);
 

	
 
        self.lookup.insert(definition_id, DefinedType{
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Component(ComponentType{
 
                variant: definition.variant,
 
                arguments,
 
                monomorphs: Vec::new()
 
            }),
 
            poly_vars,
 
            is_polymorph
 
        });
 

	
 
        Ok(())
 
    }
 

	
 
    /// Will check if the member type (field of a struct, embedded type in a
 
    /// union variant) is valid.
 
    fn check_member_parser_type(
 
        modules: &[Module], ctx: &PassCtx, base_definition_root_id: RootId,
 
        member_parser_type: &ParserType, allow_special_compiler_types: bool
 
    ) -> Result<(), ParseError> {
 
        use ParserTypeVariant as PTV;
 

	
 
        for element in &member_parser_type.elements {
 
            match element.variant {
 
                // Special cases
 
                PTV::Void | PTV::InputOrOutput | PTV::ArrayLike | PTV::IntegerLike => {
 
                    if !allow_special_compiler_types {
 
                        unreachable!("compiler-only ParserTypeVariant in member type");
 
                    }
 
                },
 
                // Builtin types, always valid
 
                PTV::Message | PTV::Bool |
 
                PTV::UInt8 | PTV::UInt16 | PTV::UInt32 | PTV::UInt64 |
 
                PTV::SInt8 | PTV::SInt16 | PTV::SInt32 | PTV::SInt64 |
 
                PTV::Character | PTV::String |
 
                PTV::Array | PTV::Input | PTV::Output |
 
                // Likewise, polymorphic variables are always valid
 
                PTV::PolymorphicArgument(_, _) => {},
 
                // Types that are not constructable, or types that are not
 
                // allowed (and checked earlier)
 
                PTV::IntegerLiteral | PTV::Inferred => {
 
                    unreachable!("illegal ParserTypeVariant within type definition");
 
                },
 
                // Finally, user-defined types
 
                PTV::Definition(definition_id, _) => {
 
                    let definition = &ctx.heap[definition_id];
 
                    if !(definition.is_struct() || definition.is_enum() || definition.is_union()) {
 
                        let source = &modules[base_definition_root_id.index as usize].source;
 
                        return Err(ParseError::new_error_str_at_span(
 
                            source, element.element_span, "expected a datatype (a struct, enum or union)"
 
                        ));
 
                    }
 

	
 
                    // Otherwise, we're fine
 
                }
 
            }
 
        }
 

	
 
        // If here, then all elements check out
 
        return Ok(());
 
    }
 

	
 
    /// Go through a list of identifiers and ensure that all identifiers have
 
    /// unique names
 
    fn check_identifier_collision<T: Sized, F: Fn(&T) -> &Identifier>(
 
        modules: &[Module], root_id: RootId, items: &[T], getter: F, item_name: &'static str
 
    ) -> Result<(), ParseError> {
 
        for (item_idx, item) in items.iter().enumerate() {
 
            let item_ident = getter(item);
 
            for other_item in &items[0..item_idx] {
 
                let other_item_ident = getter(other_item);
 
                if item_ident == other_item_ident {
 
                    let module_source = &modules[root_id.index as usize].source;
 
                    return Err(ParseError::new_error_at_span(
 
                        module_source, item_ident.span, format!("This {} is defined more than once", item_name)
 
                    ).with_info_at_span(
 
                        module_source, other_item_ident.span, format!("The other {} is defined here", item_name)
 
                    ));
 
                }
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    /// Go through a list of polymorphic arguments and make sure that the
 
    /// arguments all have unique names, and the arguments do not conflict with
 
    /// any symbols defined at the module scope.
 
    fn check_poly_args_collision(
 
        modules: &[Module], ctx: &PassCtx, root_id: RootId, poly_args: &[Identifier]
 
    ) -> Result<(), ParseError> {
 
        // Make sure polymorphic arguments are unique and none of the
 
        // identifiers conflict with any imported scopes
 
        for (arg_idx, poly_arg) in poly_args.iter().enumerate() {
 
            for other_poly_arg in &poly_args[..arg_idx] {
 
                if poly_arg == other_poly_arg {
 
                    let module_source = &modules[root_id.index as usize].source;
 
                    return Err(ParseError::new_error_str_at_span(
 
                        module_source, poly_arg.span,
 
                        "This polymorphic argument is defined more than once"
 
                    ).with_info_str_at_span(
 
                        module_source, other_poly_arg.span,
 
                        "It conflicts with this polymorphic argument"
 
                    ));
 
                }
 
            }
 

	
 
            // Check if identifier conflicts with a symbol defined or imported
 
            // in the current module
 
            if let Some(symbol) = ctx.symbols.get_symbol_by_name(SymbolScope::Module(root_id), poly_arg.value.as_bytes()) {
 
                // We have a conflict
 
                let module_source = &modules[root_id.index as usize].source;
 
                let introduction_span = symbol.variant.span_of_introduction(ctx.heap);
 
                return Err(ParseError::new_error_str_at_span(
 
                    module_source, poly_arg.span,
 
                    "This polymorphic argument conflicts with another symbol"
 
                ).with_info_str_at_span(
 
                    module_source, introduction_span,
 
                    "It conflicts due to this symbol"
 
                ));
 
            }
 
        }
 

	
 
        // All arguments are fine
 
        Ok(())
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Detecting type loops
 
    //--------------------------------------------------------------------------
 

	
 
    /// Internal function that will detect type loops and check if they're
 
    /// resolvable. If so then the appropriate union variants will be marked as
 
    /// "living on heap". If not then a `ParseError` will be returned
 
    fn detect_and_resolve_type_loops_for(&mut self, modules: &[Module], heap: &Heap, concrete_type: ConcreteType) -> Result<(), ParseError> {
 
        use DefinedTypeVariant as DTV;
 

	
 
        debug_assert!(self.type_loop_breadcrumbs.is_empty());
 
        debug_assert!(self.type_loops.is_empty());
 
        debug_assert!(self.encountered_types.is_empty());
 

	
 
        // Push the initial breadcrumb
 
        let initial_breadcrumb = self.check_member_for_type_loops(&concrete_type);
 
        if let TypeLoopResult::PushBreadcrumb(definition_id, concrete_type) = initial_breadcrumb {
 
            self.handle_new_breadcrumb_for_type_loops(definition_id, concrete_type);
 
        } else {
 
            unreachable!();
 
        }
 

	
 
        // Enter into the main resolving loop
 
        while !self.type_loop_breadcrumbs.is_empty() {
 
            // Because we might be modifying the breadcrumb array we need to
 
            let breadcrumb_idx = self.type_loop_breadcrumbs.len() - 1;
 
            let mut breadcrumb = self.type_loop_breadcrumbs[breadcrumb_idx].clone();
 

	
 
            let poly_type = self.lookup.get(&breadcrumb.definition_id).unwrap();
 
            let poly_type_definition_id = poly_type.ast_definition;
 

	
 
            let resolve_result = match &poly_type.definition {
 
                DTV::Enum(_) => {
 
                    TypeLoopResult::TypeExists
 
                },
 
                DTV::Union(definition) => {
 
                    let monomorph = &definition.monomorphs[breadcrumb.monomorph_idx];
 
                    let num_variants = monomorph.variants.len();
 

	
 
                    let mut union_result = TypeLoopResult::TypeExists;
 

	
 
                    'member_loop: while breadcrumb.next_member < num_variants {
 
                        let mono_variant = &monomorph.variants[breadcrumb.next_member];
 
                        let num_embedded = mono_variant.embedded.len();
 

	
 
                        while breadcrumb.next_embedded < num_embedded {
 
                            let mono_embedded = &mono_variant.embedded[breadcrumb.next_embedded];
 
                            union_result = self.check_member_for_type_loops(&mono_embedded.concrete_type);
 

	
 
                            if union_result != TypeLoopResult::TypeExists {
 
                                // In type loop or new breadcrumb pushed, so
 
                                // break out of the resolving loop
 
                                break 'member_loop;
 
                            }
 

	
 
                            breadcrumb.next_embedded += 1;
 
                        }
 

	
 
                        breadcrumb.next_embedded = 0;
 
                        breadcrumb.next_member += 1
 
                    }
 

	
 
                    union_result
 
                },
 
                DTV::Struct(definition) => {
 
                    let monomorph = &definition.monomorphs[breadcrumb.monomorph_idx];
 
                    let num_fields = monomorph.fields.len();
 

	
 
                    let mut struct_result = TypeLoopResult::TypeExists;
 
                    while breadcrumb.next_member < num_fields {
 
                        let mono_field = &monomorph.fields[breadcrumb.next_member];
 
                        struct_result = self.check_member_for_type_loops(&mono_field.concrete_type);
 

	
 
                        if struct_result != TypeLoopResult::TypeExists {
 
                            // Type loop or breadcrumb pushed, so break out of
 
                            // the resolving loop
 
                            break;
 
                        }
 

	
 
                        breadcrumb.next_member += 1;
 
                    }
 

	
 
                    struct_result
 
                },
 
                DTV::Function(_) | DTV::Component(_) => unreachable!(),
 
            };
 

	
 
            // Handle the result of attempting to resolve the current breadcrumb
 
            match resolve_result {
 
                TypeLoopResult::TypeExists => {
 
                    // We finished parsing the type
 
                    self.type_loop_breadcrumbs.pop();
 
                },
 
                TypeLoopResult::PushBreadcrumb(definition_id, concrete_type) => {
 
                    // We recurse into the member type.
 
                    self.type_loop_breadcrumbs[breadcrumb_idx] = breadcrumb;
 
                    self.handle_new_breadcrumb_for_type_loops(definition_id, concrete_type);
 
                },
 
                TypeLoopResult::TypeLoop(first_idx) => {
 
                    // Because we will be modifying breadcrumbs within the
 
                    // type-loop handling code, put back the modified breadcrumb
 
                    self.type_loop_breadcrumbs[breadcrumb_idx] = breadcrumb;
 

	
 
                    // We're in a type loop. Add the type loop
 
                    let mut loop_members = Vec::with_capacity(self.type_loop_breadcrumbs.len() - first_idx);
 
                    let mut contains_union = false;
 

	
 
                    for breadcrumb_idx in first_idx..self.type_loop_breadcrumbs.len() {
 
                        let breadcrumb = &mut self.type_loop_breadcrumbs[breadcrumb_idx];
 
                        let mut is_union = false;
 

	
 
                        let entry = self.lookup.get_mut(&breadcrumb.definition_id).unwrap();
 
                        match &mut entry.definition {
 
                            DTV::Union(definition) => {
 
                                // Mark the currently processed variant as requiring heap
 
                                // allocation, then advance the *embedded* type. The loop above
 
                                // will then take care of advancing it to the next *member*.
 
                                let monomorph = &mut definition.monomorphs[breadcrumb.monomorph_idx];
 
                                let variant = &mut monomorph.variants[breadcrumb.next_member];
 
                                variant.lives_on_heap = true;
 
                                breadcrumb.next_embedded += 1;
 
                                is_union = true;
 
                                contains_union = true;
 
                            },
 
                            _ => {}, // else: we don't care for now
 
                        }
 

	
 
                        loop_members.push(TypeLoopEntry{
 
                            definition_id: breadcrumb.definition_id,
 
                            monomorph_idx: breadcrumb.monomorph_idx,
 
                            is_union
 
                        });
 
                    }
 

	
 
                    let new_type_loop = TypeLoop{ members: loop_members };
 
                    if !contains_union {
 
                        // No way to (potentially) break the union. So return a
 
                        // type loop error. This is because otherwise our
 
                        // breadcrumb resolver ends up in an infinite loop.
 
                        return Err(construct_type_loop_error(
 
                            self, &new_type_loop, modules, heap
 
                        ));
 
                    }
 

	
 
                    self.type_loops.push(new_type_loop);
 
                }
 
            }
 
        }
 

	
 
        // All breadcrumbs have been cleared. So now `type_loops` contains all
 
        // of the encountered type loops, and `encountered_types` contains a
 
        // list of all unique monomorphs we encountered.
 

	
 
        // The next step is to figure out if all of the type loops can be
 
        // broken. A type loop can be broken if at least one union exists in the
 
        // loop and that union ended up having variants that are not part of
 
        // a type loop.
 
        fn type_loop_source_span_and_message<'a>(
 
            modules: &'a [Module], heap: &Heap, defined_type: &DefinedType, monomorph_idx: usize, index_in_loop: usize
 
        ) -> (&'a InputSource, InputSpan, String) {
 
            // Note: because we will discover the type loop the *first* time we
 
            // instantiate a monomorph with the provided polymorphic arguments
 
            // (not all arguments are actually used in the type). We don't have
 
            // to care about a second instantiation where certain unused
 
            // polymorphic arguments are different.
 
            let monomorph_type = match &defined_type.definition {
 
                DTV::Union(definition) => &definition.monomorphs[monomorph_idx].concrete_type,
 
                DTV::Struct(definition) => &definition.monomorphs[monomorph_idx].concrete_type,
 
                DTV::Enum(_) | DTV::Function(_) | DTV::Component(_) =>
 
                    unreachable!(), // impossible to have an enum/procedure in a type loop
 
            };
 

	
 
            let type_name = monomorph_type.display_name(&heap);
 
            let message = if index_in_loop == 0 {
 
                format!(
 
                    "encountered an infinitely large type for '{}' (which can be fixed by \
 
                    introducing a union type that has a variant whose embedded types are \
 
                    not part of a type loop, or do not have embedded types)",
 
                    type_name
 
                )
 
            } else if index_in_loop == 1 {
 
                format!("because it depends on the type '{}'", type_name)
 
            } else {
 
                format!("which depends on the type '{}'", type_name)
 
            };
 

	
 
            let ast_definition = &heap[defined_type.ast_definition];
 
            let ast_root_id = ast_definition.defined_in();
 

	
 
            return (
 
                &modules[ast_root_id.index as usize].source,
 
                ast_definition.identifier().span,
 
                message
 
            );
 
        }
 

	
 
        fn construct_type_loop_error(table: &TypeTable, type_loop: &TypeLoop, modules: &[Module], heap: &Heap) -> ParseError {
 
            let first_entry = &type_loop.members[0];
 
            let first_type = table.lookup.get(&first_entry.definition_id).unwrap();
 
            let (first_module, first_span, first_message) = type_loop_source_span_and_message(
 
                modules, heap, first_type, first_entry.monomorph_idx, 0
 
            );
 
            let mut parse_error = ParseError::new_error_at_span(first_module, first_span, first_message);
 

	
 
            for member_idx in 1..type_loop.members.len() {
 
                let entry = &type_loop.members[member_idx];
 
                let entry_type = table.lookup.get(&first_entry.definition_id).unwrap();
 
                let (module, span, message) = type_loop_source_span_and_message(
 
                    modules, heap, entry_type, entry.monomorph_idx, member_idx
 
                );
 
                parse_error = parse_error.with_info_at_span(module, span, message);
 
            }
 

	
 
            parse_error
 
        }
 

	
 
        for type_loop in &self.type_loops {
 
            let mut can_be_broken = false;
 
            debug_assert!(!type_loop.members.is_empty());
 

	
 
            for entry in &type_loop.members {
 
                if entry.is_union {
 
                    let base_type = self.lookup.get(&entry.definition_id).unwrap();
 
                    let monomorph = &base_type.definition.as_union().monomorphs[entry.monomorph_idx];
 

	
 
                    debug_assert!(!monomorph.variants.is_empty()); // otherwise it couldn't be part of the type loop
 
                    let has_stack_variant = monomorph.variants.iter().any(|variant| !variant.lives_on_heap);
 
                    if has_stack_variant {
 
                        can_be_broken = true;
 
                    }
 
                }
 
            }
 

	
 
            if !can_be_broken {
 
                // Construct a type loop error
 
                return Err(construct_type_loop_error(self, type_loop, modules, heap));
 
            }
 
        }
 

	
 
        // If here, then all type loops have been resolved and we can lay out
 
        // all of the members
 
        self.type_loops.clear();
 

	
 
        return Ok(());
 
    }
 

	
 
    /// Checks if the specified type needs to be resolved (i.e. we need to push
 
    /// a breadcrumb), is already resolved (i.e. we can continue with the next
 
    /// member of the currently considered type) or is in the process of being
 
    /// resolved (i.e. we're in a type loop). Because of borrowing rules we
 
    /// don't do any modifications of internal types here. Hence: if we
 
    /// return `PushBreadcrumb` then call `handle_new_breadcrumb_for_type_loops`
 
    /// to take care of storing the appropriate types.
 
    fn check_member_for_type_loops(&self, definition_type: &ConcreteType) -> TypeLoopResult {
 
        use ConcreteTypePart as CTP;
 

	
 
        // We're only interested in user-defined types, so exit if it is a
 
        // builtin of some sort.
 
        debug_assert!(!definition_type.parts.is_empty());
 
        let definition_id = match &definition_type.parts[0] {
 
            CTP::Instance(definition_id, _) |
 
            CTP::Function(definition_id, _) |
 
            CTP::Component(definition_id, _) => {
 
                *definition_id
 
            },
 
            _ => {
 
                return TypeLoopResult::TypeExists
 
            },
 
        };
 

	
 
        let base_type = self.lookup.get(&definition_id).unwrap();
 
        if let Some(mono_idx) = base_type.get_monomorph_index(&definition_type) {
 
            // Monomorph is already known. Check if it is present in the
 
            // breadcrumbs. If so, then we are in a type loop
 
            for (breadcrumb_idx, breadcrumb) in self.type_loop_breadcrumbs.iter().enumerate() {
 
                if breadcrumb.definition_id == definition_id && breadcrumb.monomorph_idx == mono_idx {
 
                    return TypeLoopResult::TypeLoop(breadcrumb_idx);
 
                }
 
            }
 

	
 
            return TypeLoopResult::TypeExists;
 
        }
 

	
 
        // Type is not yet known, so we need to insert it into the lookup and
 
        // push a new breadcrumb.
 
        return TypeLoopResult::PushBreadcrumb(definition_id, definition_type.clone());
 
    }
 

	
 
    /// Handles the `PushBreadcrumb` result for a `check_member_for_type_loops`
 
    /// call.
 
    fn handle_new_breadcrumb_for_type_loops(&mut self, definition_id: DefinitionId, definition_type: ConcreteType) {
 
        use DefinedTypeVariant as DTV;
 

	
 
        let base_type = self.lookup.get_mut(&definition_id).unwrap();
 
        let mut is_union = false;
 
        let monomorph_idx = match &mut base_type.definition {
 
            DTV::Enum(definition) => {
 
                debug_assert!(definition.monomorphs.is_empty());
 
                definition.monomorphs.push(EnumMonomorph{
 
                    concrete_type: definition_type,
 
                });
 
                0
 
            },
 
            DTV::Union(definition) => {
 
                // Create all the variants with their concrete types
 
                let mut mono_variants = Vec::with_capacity(definition.variants.len());
 
                for poly_variant in &definition.variants {
 
                    let mut mono_embedded = Vec::with_capacity(poly_variant.embedded.len());
 
                    for poly_embedded in &poly_variant.embedded {
 
                        let mono_concrete = Self::construct_concrete_type(poly_embedded, &definition_type);
 
                        mono_embedded.push(UnionMonomorphEmbedded{
 
                            concrete_type: mono_concrete,
 
                            size: 0,
 
                            alignment: 0,
 
                            offset: 0
 
                        });
 
                    }
 

	
 
                    mono_variants.push(UnionMonomorphVariant{
 
                        lives_on_heap: false,
 
                        embedded: mono_embedded,
 
                    })
 
                }
 

	
 
                let mono_idx = definition.monomorphs.len();
 
                definition.monomorphs.push(UnionMonomorph{
 
                    concrete_type: definition_type,
 
                    variants: mono_variants,
 
                    stack_size: 0,
 
                    stack_alignment: 0,
 
                    heap_size: 0,
 
                    heap_alignment: 0
 
                });
 

	
 
                is_union = true;
 
                mono_idx
 
            },
 
            DTV::Struct(definition) => {
 
                let mut mono_fields = Vec::with_capacity(definition.fields.len());
 
                for poly_field in &definition.fields {
 
                    let mono_concrete = Self::construct_concrete_type(&poly_field.parser_type, &definition_type);
 
                    mono_fields.push(StructMonomorphField{
 
                        concrete_type: mono_concrete,
 
                        size: 0,
 
                        alignment: 0,
 
                        offset: 0
 
                    })
 
                }
 

	
 
                let mono_idx = definition.monomorphs.len();
 
                definition.monomorphs.push(StructMonomorph{
 
                    concrete_type: definition_type,
 
                    fields: mono_fields,
 
                    size: 0,
 
                    alignment: 0
 
                });
 

	
 
                mono_idx
 
            },
 
            DTV::Function(_) | DTV::Component(_) => {
 
                unreachable!("pushing type resolving breadcrumb for procedure type")
 
            },
 
        };
 

	
 
        self.encountered_types.push(TypeLoopEntry{
 
            definition_id,
 
            monomorph_idx,
 
            is_union,
 
        });
 

	
 
        self.type_loop_breadcrumbs.push(TypeLoopBreadcrumb{
 
            definition_id,
 
            monomorph_idx,
 
            next_member: 0,
 
            next_embedded: 0,
 
        });
 
    }
 

	
 
    /// Constructs a concrete type out of a parser type for a struct field or
 
    /// union embedded type. It will do this by looking up the polymorphic
 
    /// variables in the supplied concrete type. The assumption is that the
 
    /// polymorphic variable's indices correspond to the subtrees in the
 
    /// concrete type.
 
    fn construct_concrete_type(member_type: &ParserType, container_type: &ConcreteType) -> ConcreteType {
 
        use ParserTypeVariant as PTV;
 
        use ConcreteTypePart as CTP;
 

	
 
        // TODO: Combine with code in pass_typing.rs
 
        fn parser_to_concrete_part(part: &ParserTypeVariant) -> Option<ConcreteTypePart> {
 
            match part {
 
                PTV::Void      => Some(CTP::Void),
 
                PTV::Message   => Some(CTP::Message),
 
                PTV::Bool      => Some(CTP::Bool),
 
                PTV::UInt8     => Some(CTP::UInt8),
 
                PTV::UInt16    => Some(CTP::UInt16),
 
                PTV::UInt32    => Some(CTP::UInt32),
 
                PTV::UInt64    => Some(CTP::UInt64),
 
                PTV::SInt8     => Some(CTP::SInt8),
 
                PTV::SInt16    => Some(CTP::SInt16),
 
                PTV::SInt32    => Some(CTP::SInt32),
 
                PTV::SInt64    => Some(CTP::SInt64),
 
                PTV::Character => Some(CTP::Character),
 
                PTV::String    => Some(CTP::String),
 
                PTV::Array     => Some(CTP::Array),
 
                PTV::Input     => Some(CTP::Input),
 
                PTV::Output    => Some(CTP::Output),
 
                PTV::Definition(definition_id, num) => Some(CTP::Instance(*definition_id, *num)),
 
                _              => None
 
            }
 
        }
 

	
 
        let mut parts = Vec::with_capacity(member_type.elements.len()); // usually a correct estimation, might not be
 
        for member_part in &member_type.elements {
 
            // Check if we have a regular builtin type
 
            if let Some(part) = parser_to_concrete_part(&member_part.variant) {
 
                parts.push(part);
 
                continue;
 
            }
 

	
 
            // Not builtin, but if all code is working correctly, we only care
 
            // about the polymorphic argument at this point.
 
            if let PTV::PolymorphicArgument(_container_definition_id, poly_arg_idx) = member_part.variant {
 
                debug_assert_eq!(_container_definition_id, get_concrete_type_definition(container_type));
 

	
 
                let mut container_iter = container_type.embedded_iter(0);
 
                for _ in 0..poly_arg_idx {
 
                    container_iter.next();
 
                }
 

	
 
                let poly_section = container_iter.next().unwrap();
 
                parts.extend(poly_section);
 

	
 
                continue;
 
            }
 

	
 
            unreachable!("unexpected type part {:?} from {:?}", member_part, member_type);
 
        }
 

	
 
        return ConcreteType{ parts };
 
    }
 

	
 
    //--------------------------------------------------------------------------
 
    // Determining memory layout for types
 
    //--------------------------------------------------------------------------
 

	
 
    fn lay_out_memory_for_encountered_types(&mut self, arch: &TargetArch) {
 
        use DefinedTypeVariant as DTV;
 

	
 
        // Just finished type loop detection, so we're left with the encountered
 
        // types only
 
        debug_assert!(self.type_loops.is_empty());
 
        debug_assert!(!self.encountered_types.is_empty());
 
        debug_assert!(self.memory_layout_breadcrumbs.is_empty());
 
        debug_assert!(self.size_alignment_stack.is_empty());
 

	
 
        // Push the first entry (the type we originally started with when we
 
        // were detecting type loops)
 
        let first_entry = &self.encountered_types[0];
 
        self.memory_layout_breadcrumbs.push(MemoryBreadcrumb{
 
            definition_id: first_entry.definition_id,
 
            monomorph_idx: first_entry.monomorph_idx,
 
            next_member: 0,
 
            next_embedded: 0,
 
            first_size_alignment_idx: 0,
 
        });
 

	
 
        // Enter the main resolving loop
 
        'breadcrumb_loop: while !self.memory_layout_breadcrumbs.is_empty() {
 
            let cur_breadcrumb_idx = self.memory_layout_breadcrumbs.len() - 1;
 
            let mut breadcrumb = self.memory_layout_breadcrumbs[cur_breadcrumb_idx].clone();
 

	
 
            let poly_type = self.lookup.get(&breadcrumb.definition_id).unwrap();
 
            match &poly_type.definition {
 
                DTV::Enum(definition) => {
 
                    // Size should already be computed
 
                    debug_assert!(definition.size != 0 && definition.alignment != 0);
 
                },
 
                DTV::Union(definition) => {
 
                    // Retrieve size/alignment of each embedded type. We do not
 
                    // compute the offsets or total type sizes yet.
 
                    let mono_type = &definition.monomorphs[breadcrumb.monomorph_idx];
 
                    let num_variants = mono_type.variants.len();
 
                    while breadcrumb.next_member < num_variants {
 
                        let mono_variant = &mono_type.variants[breadcrumb.next_member];
 

	
 
                        if mono_variant.lives_on_heap {
 
                            // To prevent type loops we made this a heap-
 
                            // allocated variant. This implies we cannot
 
                            // compute sizes of members at this point.
 
                        } else {
 
                            let num_embedded = mono_variant.embedded.len();
 
                            while breadcrumb.next_embedded < num_embedded {
 
                                let mono_embedded = &mono_variant.embedded[breadcrumb.next_embedded];
 
                                match self.get_memory_layout_or_breadcrumb(arch, &mono_embedded.concrete_type) {
 
                                    MemoryLayoutResult::TypeExists(size, alignment) => {
 
                                        self.size_alignment_stack.push((size, alignment));
 
                                    },
 
                                    MemoryLayoutResult::PushBreadcrumb(new_breadcrumb) => {
 
                                        self.memory_layout_breadcrumbs[cur_breadcrumb_idx] = breadcrumb;
 
                                        self.memory_layout_breadcrumbs.push(new_breadcrumb);
 
                                        continue 'breadcrumb_loop;
 
                                    }
 
                                }
 

	
 
                                breadcrumb.next_embedded += 1;
 
                            }
 
                        }
 

	
 
                        breadcrumb.next_member += 1;
 
                        breadcrumb.next_embedded = 0;
 
                    }
 

	
 
                    // If here then we can at least compute the stack size of
 
                    // the type, we'll have to come back at the very end to
 
                    // fill in the heap size/alignment/offset of each heap-
 
                    // allocated variant.
 
                    let mut max_size = definition.tag_size;
 
                    let mut max_alignment = definition.tag_size;
 

	
 
                    let poly_type = self.lookup.get_mut(&breadcrumb.definition_id).unwrap();
 
                    let definition = poly_type.definition.as_union_mut();
 
                    let mono_type = &mut definition.monomorphs[breadcrumb.monomorph_idx];
 
                    let mut size_alignment_idx = breadcrumb.first_size_alignment_idx;
 

	
 
                    for variant in &mut mono_type.variants {
 
                        // We're doing stack computations, so always start with
 
                        // the tag size/alignment.
 
                        let mut variant_offset = definition.tag_size;
 
                        let mut variant_alignment = definition.tag_size;
 

	
 
                        if variant.lives_on_heap {
 
                            // Variant lives on heap, so just a pointer
 
                            let (ptr_size, ptr_align) = arch.pointer_size_alignment;
 
                            align_offset_to(&mut variant_offset, ptr_align);
 

	
 
                            variant_offset += ptr_size;
 
                            variant_alignment = variant_alignment.max(ptr_align);
 
                        } else {
 
                            // Variant lives on stack, so walk all embedded
 
                            // types.
 
                            for embedded in &mut variant.embedded {
 
                                let (size, alignment) = self.size_alignment_stack[size_alignment_idx];
 
                                embedded.size = size;
 
                                embedded.alignment = alignment;
 
                                size_alignment_idx += 1;
 

	
 
                                align_offset_to(&mut variant_offset, alignment);
 
                                embedded.offset = variant_offset;
 

	
 
                                variant_offset += size;
 
                                variant_alignment = variant_alignment.max(alignment);
 
                            }
 
                        };
 

	
 
                        max_size = max_size.max(variant_offset);
 
                        max_alignment = max_alignment.max(variant_alignment);
 
                    }
 

	
 
                    mono_type.stack_size = max_size;
 
                    mono_type.stack_alignment = max_alignment;
 
                    self.size_alignment_stack.truncate(breadcrumb.first_size_alignment_idx);
 
                },
 
                DTV::Struct(definition) => {
 
                    // Retrieve size and alignment of each struct member. We'll
 
                    // compute the offsets once all of those are known
 
                    let mono_type = &definition.monomorphs[breadcrumb.monomorph_idx];
 
                    let num_fields = mono_type.fields.len();
 
                    while breadcrumb.next_member < num_fields {
 
                        let mono_field = &mono_type.fields[breadcrumb.next_member];
 

	
 
                        match self.get_memory_layout_or_breadcrumb(arch, &mono_field.concrete_type) {
 
                            MemoryLayoutResult::TypeExists(size, alignment) => {
 
                                self.size_alignment_stack.push((size, alignment))
 
                            },
 
                            MemoryLayoutResult::PushBreadcrumb(new_breadcrumb) => {
 
                                self.memory_layout_breadcrumbs[cur_breadcrumb_idx] = breadcrumb;
 
                                self.memory_layout_breadcrumbs.push(new_breadcrumb);
 
                                continue 'breadcrumb_loop;
 
                            },
 
                        }
 

	
 
                        breadcrumb.next_member += 1;
 
                    }
 

	
 
                    // Compute offsets and size of total type
 
                    let mut cur_offset = 0;
 
                    let mut max_alignment = 1;
 

	
 
                    let poly_type = self.lookup.get_mut(&breadcrumb.definition_id).unwrap();
 
                    let definition = poly_type.definition.as_struct_mut();
 
                    let mono_type = &mut definition.monomorphs[breadcrumb.monomorph_idx];
 
                    let mut size_alignment_idx = breadcrumb.first_size_alignment_idx;
 

	
 
                    for field in &mut mono_type.fields {
 
                        let (size, alignment) = self.size_alignment_stack[size_alignment_idx];
 
                        field.size = size;
 
                        field.alignment = alignment;
 
                        size_alignment_idx += 1;
 

	
 
                        align_offset_to(&mut cur_offset, alignment);
 
                        field.offset = cur_offset;
 

	
 
                        cur_offset += size;
 
                        max_alignment = max_alignment.max(alignment);
 
                    }
 

	
 
                    mono_type.size = cur_offset;
 
                    mono_type.alignment = max_alignment;
src/protocol/tests/eval_silly.rs
Show inline comments
 
use super::*;
 

	
 
#[test]
 
fn test_concatenate_operator() {
 
    Tester::new_single_source_expect_ok(
 
        "concatenate and check values",
 
        "
 
        // Too see if we accept the polymorphic arg
 
        func check_pair<T>(T[] arr, u32 idx) -> bool {
 
            return arr[idx] == arr[idx + 1];
 
        }
 

	
 
        // Too see if we can check fields of polymorphs
 
        func check_values<T>(T[] arr, u32 idx, u32 x, u32 y) -> bool {
 
            auto value = arr[idx];
 
            return value.x == x && value.y == y;
 
        }
 

	
 
        struct Point2D {
 
            u32 x,
 
            u32 y,
 
        }
 

	
 
        // Could do this inline, but we're attempt to stress the system a bit
 
        func create_point(u32 x, u32 y) -> Point2D {
 
            return Point2D{ x: x, y: y };
 
        }
 

	
 
        // Again, more stressing: returning a heap-allocated thing
 
        func create_array() -> Point2D[] {
 
            return {
 
                create_point(1, 2),
 
                create_point(1, 2),
 
                create_point(3, 4),
 
                create_point(3, 4)
 
            };
 
        }
 

	
 
        // The silly checkamajig
 
        func foo() -> bool {
 
            auto lhs = create_array();
 
            auto rhs = create_array();
 
            auto total = lhs @ rhs;
 
            auto is_equal =
 
                check_pair(total, 0) &&
 
                check_pair(total, 2) &&
 
                check_pair(total, 4) &&
 
                check_pair(total, 6);
 
            auto is_not_equal =
 
                !check_pair(total, 0) ||
 
                !check_pair(total, 2) ||
 
                !check_pair(total, 4) ||
 
                !check_pair(total, 6);
 
            auto has_correct_fields =
 
                check_values(total, 3, 3, 4) &&
 
                check_values(total, 4, 1, 2);
 
            auto array_check = lhs == rhs && total == total;
 
            return is_equal && !is_not_equal && has_correct_fields && array_check;
 
        }
 
        "
 
    ).for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_slicing_magic() {
 
    Tester::new_single_source_expect_ok("slicing", "
 
        struct Holder<T> {
 
            T[] left,
 
            T[] right,
 
        }
 

	
 
        func create_array<T>(T first_index, T last_index) -> T[] {
 
            auto result = {};
 
            while (first_index < last_index) {
 
                // Absolutely rediculous, but we don't have builtin array functions yet...
 
                result = result @ { first_index };
 
                first_index += 1;
 
            }
 
            return result;
 
        }
 

	
 
        func create_holder<T>(T left_first, T left_last, T right_first, T right_last) -> Holder<T> {
 
            return Holder{
 
                left: create_array(left_first, left_last),
 
                right: create_array(right_first, right_last)
 
            };
 
        }
 

	
 
        // Another silly thing, we first slice the full thing. Then subslice a single
 
        // element, then concatenate. We always return an array of two things.
 
        func slicing_magic<T>(Holder<T> holder, u32 left_base, u32 left_amount, u32 right_base, u32 right_amount) -> T[] {
 
            auto left = holder.left[left_base..left_base + left_amount];
 
            auto right = holder.right[right_base..right_base + right_amount];
 
            return left[0..1] @ right[0..1];
 
        }
 

	
 
        func foo() -> bool {
 
            // Preliminaries:
 
            // 1. construct a holder, where:
 
            //      - left array will be [0, 1, 2, ...]
 
            //      - right array will be [2, 3, 4, ...]
 
            // 2. Perform slicing magic, always returning an array [3, 3]
 
            // 3. Make sure result is 6
 

	
 
            // But ofcourse, because we want to be silly, we will check this for
 
            // any possible integer type.
 
            auto created_u08 = create_holder<u8> (0, 5, 2, 8);
 
            auto created_u16 = create_holder<u16>(0, 5, 2, 8);
 
            auto created_u32 = create_holder<u32>(0, 5, 2, 8);
 
            auto created_u64 = create_holder<u64>(0, 5, 2, 8);
 

	
 
            auto result_u08 = slicing_magic(created_u08, 3, 2, 1, 2);
 
            auto result_u16 = slicing_magic(created_u16, 3, 2, 1, 2);
 
            auto result_u32 = slicing_magic(created_u32, 3, 2, 1, 2);
 
            auto result_u64 = slicing_magic(created_u64, 3, 2, 1, 2);
 

	
 
            auto result_s08 = slicing_magic(create_holder<s8> (0, 5, 2, 8), 3, 2, 1, 2);
 
            auto result_s16 = slicing_magic(create_holder<s16>(0, 5, 2, 8), 3, 2, 1, 2);
 
            auto result_s32 = slicing_magic(create_holder<s32>(0, 5, 2, 8), 3, 2, 1, 2);
 
            auto result_s64 = slicing_magic(create_holder<s64>(0, 5, 2, 8), 3, 2, 1, 2);
 

	
 
            return
 
                result_u08[0] + result_u08[1] == 6 &&
 
                result_u16[0] + result_u16[1] == 6 &&
 
                result_u32[0] + result_u32[1] == 6 &&
 
                result_u64[0] + result_u64[1] == 6 &&
 
                result_s08[0] + result_s08[1] == 6 &&
 
                result_s16[0] + result_s16[1] == 6 &&
 
                result_s32[0] + result_s32[1] == 6 &&
 
                result_s64[0] + result_s64[1] == 6;
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    }).for_struct("Holder", |s| { s
 
        .assert_num_monomorphs(8)
 
        .assert_has_monomorph("u8")
 
        .assert_has_monomorph("u16")
 
        .assert_has_monomorph("u32")
 
        .assert_has_monomorph("u64")
 
        .assert_has_monomorph("s8")
 
        .assert_has_monomorph("s16")
 
        .assert_has_monomorph("s32")
 
        .assert_has_monomorph("s64");
 
        .assert_has_monomorph("Holder<u8>")
 
        .assert_has_monomorph("Holder<u16>")
 
        .assert_has_monomorph("Holder<u32>")
 
        .assert_has_monomorph("Holder<u64>")
 
        .assert_has_monomorph("Holder<s8>")
 
        .assert_has_monomorph("Holder<s16>")
 
        .assert_has_monomorph("Holder<s32>")
 
        .assert_has_monomorph("Holder<s64>");
 
    });
 
}
 

	
 
#[test]
 
fn test_struct_fields() {
 
    Tester::new_single_source_expect_ok("struct field access", "
 
        struct Nester<T> {
 
            T v,
 
        }
 

	
 
        func make<T>(T inner) -> Nester<T> {
 
            return Nester{ v: inner };
 
        }
 

	
 
        func modify<T>(Nester<T> outer, T inner) -> Nester<T> {
 
            outer.v = inner;
 
            return outer;
 
        }
 

	
 
        func foo() -> bool {
 
            // Single depth modification
 
            auto original1 = make<u32>(5);
 
            auto modified1 = modify(original1, 2);
 
            auto success1 = original1.v == 5 && modified1.v == 2;
 

	
 
            // Multiple levels of modification
 
            auto original2 = make(make(make(make(true))));
 
            auto modified2 = modify(original2.v, make(make(false))); // strip one Nester level
 
            auto success2 = original2.v.v.v.v == true && modified2.v.v.v == false;
 

	
 
            return success1 && success2;
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_field_selection_polymorphism() {
 
    // Bit silly, but just to be sure
 
    Tester::new_single_source_expect_ok("struct field shuffles", "
 
        struct VecXYZ<T> { T x, T y, T z }
 
        struct VecYZX<T> { T y, T z, T x }
 
        struct VecZXY<T> { T z, T x, T y }
 

	
 
        func modify_x<T>(T input) -> T {
 
            input.x = 1337;
 
            return input;
 
        }
 

	
 
        func foo() -> bool {
 
            auto xyz = VecXYZ<u16>{ x: 1, y: 2, z: 3 };
 
            auto yzx = VecYZX<u32>{ y: 2, z: 3, x: 1 };
 
            auto zxy = VecZXY<u64>{ x: 1, y: 2, z: 3 }; // different initialization order
 

	
 
            auto mod_xyz = modify_x(xyz);
 
            auto mod_yzx = modify_x(yzx);
 
            auto mod_zxy = modify_x(zxy);
 

	
 
            return
 
                xyz.x == 1 && xyz.y == 2 && xyz.z == 3 &&
 
                yzx.x == 1 && yzx.y == 2 && yzx.z == 3 &&
 
                zxy.x == 1 && zxy.y == 2 && zxy.z == 3 &&
 
                mod_xyz.x == 1337 && mod_xyz.y == 2 && mod_xyz.z == 3 &&
 
                mod_yzx.x == 1337 && mod_yzx.y == 2 && mod_yzx.z == 3 &&
 
                mod_zxy.x == 1337 && mod_zxy.y == 2 && mod_zxy.z == 3;
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 

	
 
#[test]
 
fn test_index_error() {
 
    Tester::new_single_source_expect_ok("indexing error", "
 
        func check_array(u32[] vals, u32 idx) -> u32 {
 
            return vals[idx];
 
        }
 

	
 
        func foo() -> u32 {
 
            auto array = {1, 2, 3, 4, 5, 6, 7};
 
            check_array(array, 7);
 
            return array[0];
 
        }
 
    ").for_function("foo", |f| {
 
        f.call_err("index 7 is out of bounds: array length is 7");
 
    });
 
}
 
\ No newline at end of file
0 comments (0 inline, 0 general)