/// pass_typing /// /// Performs type inference and type checking. Type inference is implemented by /// applying constraints on (sub)trees of types. During this process the /// resolver takes the `ParserType` structs (the representation of the types /// written by the programmer), converts them to `InferenceType` structs (the /// temporary data structure used during type inference) and attempts to arrive /// at `ConcreteType` structs (the representation of a fully checked and /// validated type). /// /// The resolver will visit every statement and expression relevant to the /// procedure and insert and determine its initial type based on context (e.g. a /// return statement's expression must match the function's return type, an /// if statement's test expression must evaluate to a boolean). When all are /// visited we attempt to make progress in evaluating the types. Whenever a type /// is progressed we queue the related expressions for further type progression. /// Once no more expressions are in the queue the algorithm is finished. At this /// point either all types are inferred (or can be trivially implicitly /// determined), or we have incomplete types. In the latter case we return an /// error. /// /// TODO: Needs a thorough rewrite: /// 0. polymorph_progress is intentionally broken at the moment. Make it work /// again and use a normal VecSomething. /// 1. The foundation for doing all of the work with predetermined indices /// instead of with HashMaps is there, but it is not really used because of /// time constraints. When time is available, rewrite the system such that /// AST IDs are not needed, and only indices into arrays are used. /// 2. Remove the `msg` type? /// 3. Disallow certain types in certain operations (e.g. `Void`). macro_rules! debug_log_enabled { () => { false }; } macro_rules! debug_log { ($format:literal) => { enabled_debug_print!(false, "types", $format); }; ($format:literal, $($args:expr),*) => { enabled_debug_print!(false, "types", $format, $($args),*); }; } use crate::collections::{ScopedBuffer, ScopedSection, DequeSet}; use crate::protocol::ast::*; use crate::protocol::input_source::ParseError; use crate::protocol::parser::ModuleCompilationPhase; use crate::protocol::parser::type_table::*; use crate::protocol::parser::token_parsing::*; use super::visitor::{ BUFFER_INIT_CAP_LARGE, BUFFER_INIT_CAP_SMALL, Ctx, }; // ----------------------------------------------------------------------------- // Inference type // ----------------------------------------------------------------------------- const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ]; const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ]; const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ]; const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ]; const STRING_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::String, InferenceTypePart::Character ]; const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ]; const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ]; const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ]; const SLICE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Slice, InferenceTypePart::Unknown ]; const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ]; /// TODO: @performance Turn into PartialOrd+Ord to simplify checks #[derive(Debug, Clone, Eq, PartialEq)] pub(crate) enum InferenceTypePart { // When we infer types of AST elements that support polymorphic arguments, // then we might have the case that multiple embedded types depend on the // polymorphic type (e.g. func bla(T a, T[] b) -> T[][]). If we can infer // the type in one place (e.g. argument a), then we may propagate this // information to other types (e.g. argument b and the return type). For // this reason we place markers in the `InferenceType` instances such that // we know which part of the type was originally a polymorphic argument. Marker(u32), // Completely unknown type, needs to be inferred Unknown, // Partially known type, may be inferred to to be the appropriate related // type. // IndexLike, // index into array/slice NumberLike, // any kind of integer/float IntegerLike, // any kind of integer ArrayLike, // array or slice. Note that this must have a subtype PortLike, // input or output port // Special types that cannot be instantiated by the user Void, // For builtin functions that do not return anything // Concrete types without subtypes Bool, UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64, Character, String, // One subtype Message, Array, Slice, Input, Output, // Tuple with any number of subtypes (for practical reasons 1 element is impossible) Tuple(u32), // A user-defined type with any number of subtypes Instance(DefinitionId, u32) } impl InferenceTypePart { fn is_marker(&self) -> bool { match self { InferenceTypePart::Marker(_) => true, _ => false, } } /// Checks if the type is concrete, markers are interpreted as concrete /// types. fn is_concrete(&self) -> bool { use InferenceTypePart as ITP; match self { ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => false, _ => true } } fn is_concrete_number(&self) -> bool { use InferenceTypePart as ITP; match self { ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 | ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true, _ => false, } } fn is_concrete_integer(&self) -> bool { use InferenceTypePart as ITP; match self { ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 | ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true, _ => false, } } fn is_concrete_arraylike(&self) -> bool { use InferenceTypePart as ITP; match self { ITP::Array | ITP::Slice | ITP::String | ITP::Message => true, _ => false, } } fn is_concrete_port(&self) -> bool { use InferenceTypePart as ITP; match self { ITP::Input | ITP::Output => true, _ => false, } } /// Checks if a part is less specific than the argument. Only checks for /// single-part inference (i.e. not the replacement of an `Unknown` variant /// with the argument) fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool { use InferenceTypePart as ITP; (*self == ITP::IntegerLike && arg.is_concrete_integer()) || (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) || (*self == ITP::ArrayLike && arg.is_concrete_arraylike()) || (*self == ITP::PortLike && arg.is_concrete_port()) } /// Checks if a part is more specific /// Returns the change in "iteration depth" when traversing this particular /// part. The iteration depth is used to traverse the tree in a linear /// fashion. It is basically `number_of_subtypes - 1` fn depth_change(&self) -> i32 { use InferenceTypePart as ITP; match &self { ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | ITP::Void | ITP::Bool | ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 | ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 | ITP::Character => { -1 }, ITP::Marker(_) | ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice | ITP::PortLike | ITP::Input | ITP::Output | ITP::String => { // One subtype, so do not modify depth 0 }, ITP::Tuple(num) | ITP::Instance(_, num) => { (*num as i32) - 1 } } } } #[derive(Debug, Clone)] struct InferenceType { has_marker: bool, is_done: bool, parts: Vec, } impl InferenceType { /// Generates a new InferenceType. The two boolean flags will be checked in /// debug mode. fn new(has_marker: bool, is_done: bool, parts: Vec) -> Self { dbg_code!({ debug_assert!(!parts.is_empty()); let parts_body_marker = parts.iter().any(|v| v.is_marker()); debug_assert_eq!(has_marker, parts_body_marker); let parts_done = parts.iter().all(|v| v.is_concrete()); debug_assert_eq!(is_done, parts_done, "{:?}", parts); }); Self{ has_marker, is_done, parts } } /// Replaces a type subtree with the provided subtree. The caller must make /// sure the the replacement is a well formed type subtree. fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) { let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx); debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0)); self.parts.splice(start_idx..end_idx, with.iter().cloned()); self.recompute_is_done(); } // TODO: @performance, might all be done inline in the type inference methods fn recompute_is_done(&mut self) { self.is_done = self.parts.iter().all(|v| v.is_concrete()); } /// Seeks a body marker starting at the specified position. If a marker is /// found then its value and the index of the type subtree that follows it /// is returned. fn find_marker(&self, mut start_idx: usize) -> Option<(u32, usize)> { while start_idx < self.parts.len() { if let InferenceTypePart::Marker(marker) = &self.parts[start_idx] { return Some((*marker, start_idx + 1)) } start_idx += 1; } None } /// Returns an iterator over all body markers and the partial type tree that /// follows those markers. If it is a problem that `InferenceType` is /// borrowed by the iterator, then use `find_body_marker`. fn marker_iter(&self) -> InferenceTypeMarkerIter { InferenceTypeMarkerIter::new(&self.parts) } /// Given that the `parts` are a depth-first serialized tree of types, this /// function finds the subtree anchored at a specific node. The returned /// index is exclusive. fn find_subtree_end_idx(parts: &[InferenceTypePart], start_idx: usize) -> usize { let mut depth = 1; let mut idx = start_idx; while idx < parts.len() { depth += parts[idx].depth_change(); if depth == 0 { return idx + 1; } idx += 1; } // If here, then the inference type is malformed unreachable!("Malformed type: {:?}", parts); } /// Call that attempts to infer the part at `to_infer.parts[to_infer_idx]` /// using the subtree at `template.parts[template_idx]`. Will return /// `Some(depth_change_due_to_traversal)` if type inference has been /// applied. In this case the indices will also be modified to point to the /// next part in both templates. If type inference has not (or: could not) /// be applied then `None` will be returned. Note that this might mean that /// the types are incompatible. /// /// As this is a helper functions, some assumptions: the parts are not /// exactly equal, and neither of them contains a marker. Also: only the /// `to_infer` parts are checked for inference. It might be that this /// function returns `None`, but that that `template` is still compatible /// with `to_infer`, e.g. when `template` has an `Unknown` part. fn infer_part_for_single_type( to_infer: &mut InferenceType, to_infer_idx: &mut usize, template_parts: &[InferenceTypePart], template_idx: &mut usize, ) -> Option { use InferenceTypePart as ITP; let to_infer_part = &to_infer.parts[*to_infer_idx]; let template_part = &template_parts[*template_idx]; // Check for programmer mistakes debug_assert_ne!(to_infer_part, template_part); debug_assert!(!to_infer_part.is_marker(), "marker encountered in 'infer part'"); debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'"); // Inference of a somewhat-specified type if to_infer_part.may_be_inferred_from(template_part) { let depth_change = to_infer_part.depth_change(); debug_assert_eq!(depth_change, template_part.depth_change()); to_infer.parts[*to_infer_idx] = template_part.clone(); *to_infer_idx += 1; *template_idx += 1; return Some(depth_change); } // Inference of a completely unknown type if *to_infer_part == ITP::Unknown { // template part is different, so cannot be unknown, hence copy the // entire subtree. Make sure not to copy markers. let template_end_idx = Self::find_subtree_end_idx(template_parts, *template_idx); to_infer.parts[*to_infer_idx] = template_parts[*template_idx].clone(); // first element *to_infer_idx += 1; for template_idx in *template_idx + 1..template_end_idx { let template_part = &template_parts[template_idx]; if !template_part.is_marker() { to_infer.parts.insert(*to_infer_idx, template_part.clone()); *to_infer_idx += 1; } } *template_idx = template_end_idx; // Note: by definition the LHS was Unknown and the RHS traversed a // full subtree. return Some(-1); } None } /// Call that checks if the `to_check` part is compatible with the `infer` /// part. This is essentially a copy of `infer_part_for_single_type`, but /// without actually copying the type parts. fn check_part_for_single_type( to_check_parts: &[InferenceTypePart], to_check_idx: &mut usize, template_parts: &[InferenceTypePart], template_idx: &mut usize ) -> Option { use InferenceTypePart as ITP; let to_check_part = &to_check_parts[*to_check_idx]; let template_part = &template_parts[*template_idx]; // Checking programmer errors debug_assert_ne!(to_check_part, template_part); debug_assert!(!to_check_part.is_marker(), "marker encountered in 'to_check part'"); debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'"); if to_check_part.may_be_inferred_from(template_part) { let depth_change = to_check_part.depth_change(); debug_assert_eq!(depth_change, template_part.depth_change()); *to_check_idx += 1; *template_idx += 1; return Some(depth_change); } if *to_check_part == ITP::Unknown { *to_check_idx += 1; *template_idx = Self::find_subtree_end_idx(template_parts, *template_idx); // By definition LHS and RHS had depth change of -1 return Some(-1); } None } /// Attempts to infer types between two `InferenceType` instances. This /// function is unsafe as it accepts pointers to work around Rust's /// borrowing rules. The caller must ensure that the pointers are distinct. unsafe fn infer_subtrees_for_both_types( type_a: *mut InferenceType, start_idx_a: usize, type_b: *mut InferenceType, start_idx_b: usize ) -> DualInferenceResult { debug_assert!(!std::ptr::eq(type_a, type_b), "encountered pointers to the same inference type"); let type_a = &mut *type_a; let type_b = &mut *type_b; let mut modified_a = false; let mut modified_b = false; let mut idx_a = start_idx_a; let mut idx_b = start_idx_b; let mut depth = 1; while depth > 0 { // Advance indices if we encounter markers or equal parts let part_a = &type_a.parts[idx_a]; let part_b = &type_b.parts[idx_b]; if part_a == part_b { let depth_change = part_a.depth_change(); depth += depth_change; debug_assert_eq!(depth_change, part_b.depth_change()); idx_a += 1; idx_b += 1; continue; } if part_a.is_marker() { idx_a += 1; continue; } if part_b.is_marker() { idx_b += 1; continue; } // Types are not equal and are both not markers if let Some(depth_change) = Self::infer_part_for_single_type(type_a, &mut idx_a, &type_b.parts, &mut idx_b) { depth += depth_change; modified_a = true; continue; } if let Some(depth_change) = Self::infer_part_for_single_type(type_b, &mut idx_b, &type_a.parts, &mut idx_a) { depth += depth_change; modified_b = true; continue; } // Types can not be inferred in any way: types must be incompatible return DualInferenceResult::Incompatible; } if modified_a { type_a.recompute_is_done(); } if modified_b { type_b.recompute_is_done(); } // If here then we completely inferred the subtrees. match (modified_a, modified_b) { (false, false) => DualInferenceResult::Neither, (false, true) => DualInferenceResult::Second, (true, false) => DualInferenceResult::First, (true, true) => DualInferenceResult::Both } } /// Attempts to infer the first subtree based on the template. Like /// `infer_subtrees_for_both_types`, but now only applying inference to /// `to_infer` based on the type information in `template`. /// /// The `forced_template` flag controls whether `to_infer` is considered /// valid if it is more specific then the template. When `forced_template` /// is false, then as long as the `to_infer` and `template` types are /// compatible the inference will succeed. If `forced_template` is true, /// then `to_infer` MUST be less specific than `template` (e.g. /// `IntegerLike` is less specific than `UInt32`) fn infer_subtree_for_single_type( to_infer: &mut InferenceType, mut to_infer_idx: usize, template: &[InferenceTypePart], mut template_idx: usize, forced_template: bool, ) -> SingleInferenceResult { let mut modified = false; let mut depth = 1; while depth > 0 { let to_infer_part = &to_infer.parts[to_infer_idx]; let template_part = &template[template_idx]; if to_infer_part == template_part { let depth_change = to_infer_part.depth_change(); depth += depth_change; debug_assert_eq!(depth_change, template_part.depth_change()); to_infer_idx += 1; template_idx += 1; continue; } if to_infer_part.is_marker() { to_infer_idx += 1; continue; } if template_part.is_marker() { template_idx += 1; continue; } // Types are not equal and not markers. So check if we can infer // anything if let Some(depth_change) = Self::infer_part_for_single_type( to_infer, &mut to_infer_idx, template, &mut template_idx ) { depth += depth_change; modified = true; continue; } if !forced_template { // We cannot infer anything, but the template may still be // compatible with the type we're inferring if let Some(depth_change) = Self::check_part_for_single_type( template, &mut template_idx, &to_infer.parts, &mut to_infer_idx ) { depth += depth_change; continue; } } return SingleInferenceResult::Incompatible } if modified { to_infer.recompute_is_done(); return SingleInferenceResult::Modified; } else { return SingleInferenceResult::Unmodified; } } /// Checks if both types are compatible, doesn't perform any inference fn check_subtrees( type_parts_a: &[InferenceTypePart], start_idx_a: usize, type_parts_b: &[InferenceTypePart], start_idx_b: usize ) -> bool { let mut depth = 1; let mut idx_a = start_idx_a; let mut idx_b = start_idx_b; while depth > 0 { let part_a = &type_parts_a[idx_a]; let part_b = &type_parts_b[idx_b]; if part_a == part_b { let depth_change = part_a.depth_change(); depth += depth_change; debug_assert_eq!(depth_change, part_b.depth_change()); idx_a += 1; idx_b += 1; continue; } if part_a.is_marker() { idx_a += 1; continue; } if part_b.is_marker() { idx_b += 1; continue; } if let Some(depth_change) = Self::check_part_for_single_type( type_parts_a, &mut idx_a, type_parts_b, &mut idx_b ) { depth += depth_change; continue; } if let Some(depth_change) = Self::check_part_for_single_type( type_parts_b, &mut idx_b, type_parts_a, &mut idx_a ) { depth += depth_change; continue; } return false; } true } /// Performs the conversion of the inference type into a concrete type. /// By calling this function you must make sure that no unspecified types /// (e.g. Unknown or IntegerLike) exist in the type. Will not clear or check /// if the supplied `ConcreteType` is empty, will simply append to the parts /// vector. fn write_concrete_type(&self, concrete_type: &mut ConcreteType) { use InferenceTypePart as ITP; use ConcreteTypePart as CTP; // Make sure inference type is specified but concrete type is not yet specified debug_assert!(!self.parts.is_empty()); concrete_type.parts.reserve(self.parts.len()); let mut idx = 0; while idx < self.parts.len() { let part = &self.parts[idx]; let converted_part = match part { ITP::Marker(_) => { // Markers are removed when writing to the concrete type. idx += 1; continue; }, ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => { // Should not happen if type inferencing works correctly: we // should have returned a programmer-readable error or have // inferred all types. unreachable!("attempted to convert inference type part {:?} into concrete type", part); }, ITP::Void => CTP::Void, ITP::Message => CTP::Message, ITP::Bool => CTP::Bool, ITP::UInt8 => CTP::UInt8, ITP::UInt16 => CTP::UInt16, ITP::UInt32 => CTP::UInt32, ITP::UInt64 => CTP::UInt64, ITP::SInt8 => CTP::SInt8, ITP::SInt16 => CTP::SInt16, ITP::SInt32 => CTP::SInt32, ITP::SInt64 => CTP::SInt64, ITP::Character => CTP::Character, ITP::String => { // Inferred type has a 'char' subtype to simplify array // checking, we remove it here. debug_assert_eq!(self.parts[idx + 1], InferenceTypePart::Character); idx += 1; CTP::String }, ITP::Array => CTP::Array, ITP::Slice => CTP::Slice, ITP::Input => CTP::Input, ITP::Output => CTP::Output, ITP::Tuple(num) => CTP::Tuple(*num), ITP::Instance(id, num) => CTP::Instance(*id, *num), }; concrete_type.parts.push(converted_part); idx += 1; } } /// Writes a human-readable version of the type to a string. This is used /// to display error messages fn write_display_name( buffer: &mut String, heap: &Heap, parts: &[InferenceTypePart], mut idx: usize ) -> usize { use InferenceTypePart as ITP; match &parts[idx] { ITP::Marker(_marker_idx) => { if debug_log_enabled!() { buffer.push_str(&format!("{{Marker:{}}}", *_marker_idx)); } idx = Self::write_display_name(buffer, heap, parts, idx + 1); }, ITP::Unknown => buffer.push_str("?"), ITP::NumberLike => buffer.push_str("numberlike"), ITP::IntegerLike => buffer.push_str("integerlike"), ITP::ArrayLike => { idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push_str("[?]"); }, ITP::PortLike => { buffer.push_str("portlike<"); idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push('>'); } ITP::Void => buffer.push_str("void"), ITP::Bool => buffer.push_str(KW_TYPE_BOOL_STR), ITP::UInt8 => buffer.push_str(KW_TYPE_UINT8_STR), ITP::UInt16 => buffer.push_str(KW_TYPE_UINT16_STR), ITP::UInt32 => buffer.push_str(KW_TYPE_UINT32_STR), ITP::UInt64 => buffer.push_str(KW_TYPE_UINT64_STR), ITP::SInt8 => buffer.push_str(KW_TYPE_SINT8_STR), ITP::SInt16 => buffer.push_str(KW_TYPE_SINT16_STR), ITP::SInt32 => buffer.push_str(KW_TYPE_SINT32_STR), ITP::SInt64 => buffer.push_str(KW_TYPE_SINT64_STR), ITP::Character => buffer.push_str(KW_TYPE_CHAR_STR), ITP::String => { buffer.push_str(KW_TYPE_STRING_STR); idx += 1; // skip the 'char' subtype }, ITP::Message => { buffer.push_str(KW_TYPE_MESSAGE_STR); buffer.push('<'); idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push('>'); }, ITP::Array => { idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push_str("[]"); }, ITP::Slice => { idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push_str("[..]"); }, ITP::Input => { buffer.push_str(KW_TYPE_IN_PORT_STR); buffer.push('<'); idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push('>'); }, ITP::Output => { buffer.push_str(KW_TYPE_OUT_PORT_STR); buffer.push('<'); idx = Self::write_display_name(buffer, heap, parts, idx + 1); buffer.push('>'); }, ITP::Tuple(num_sub) => { buffer.push('('); if *num_sub > 0 { idx = Self::write_display_name(buffer, heap, parts, idx + 1); for _sub_idx in 1..*num_sub { buffer.push_str(", "); idx = Self::write_display_name(buffer, heap, parts, idx + 1); } } buffer.push(')'); } ITP::Instance(definition_id, num_sub) => { let definition = &heap[*definition_id]; buffer.push_str(definition.identifier().value.as_str()); if *num_sub > 0 { buffer.push('<'); idx = Self::write_display_name(buffer, heap, parts, idx + 1); for _sub_idx in 1..*num_sub { buffer.push_str(", "); idx = Self::write_display_name(buffer, heap, parts, idx + 1); } buffer.push('>'); } }, } idx } /// Returns the display name of a (part of) the type tree. Will allocate a /// string. fn partial_display_name(heap: &Heap, parts: &[InferenceTypePart]) -> String { let mut buffer = String::with_capacity(parts.len() * 6); Self::write_display_name(&mut buffer, heap, parts, 0); buffer } /// Returns the display name of the full type tree. Will allocate a string. fn display_name(&self, heap: &Heap) -> String { Self::partial_display_name(heap, &self.parts) } } impl Default for InferenceType { fn default() -> Self { Self{ has_marker: false, is_done: false, parts: Vec::new(), } } } /// Iterator over the subtrees that follow a marker in an `InferenceType` /// instance. Returns immutable slices over the internal parts struct InferenceTypeMarkerIter<'a> { parts: &'a [InferenceTypePart], idx: usize, } impl<'a> InferenceTypeMarkerIter<'a> { fn new(parts: &'a [InferenceTypePart]) -> Self { Self{ parts, idx: 0 } } } impl<'a> Iterator for InferenceTypeMarkerIter<'a> { type Item = (u32, &'a [InferenceTypePart]); fn next(&mut self) -> Option { // Iterate until we find a marker while self.idx < self.parts.len() { if let InferenceTypePart::Marker(marker) = self.parts[self.idx] { // Found a marker, find the subtree end let start_idx = self.idx + 1; let end_idx = InferenceType::find_subtree_end_idx(self.parts, start_idx); // Modify internal index, then return items self.idx = end_idx; return Some((marker, &self.parts[start_idx..end_idx])); } self.idx += 1; } None } } #[derive(Debug, PartialEq, Eq)] enum DualInferenceResult { Neither, // neither argument is clarified First, // first argument is clarified using the second one Second, // second argument is clarified using the first one Both, // both arguments are clarified Incompatible, // types are incompatible: programmer error } impl DualInferenceResult { fn modified_lhs(&self) -> bool { match self { DualInferenceResult::First | DualInferenceResult::Both => true, _ => false } } fn modified_rhs(&self) -> bool { match self { DualInferenceResult::Second | DualInferenceResult::Both => true, _ => false } } } #[derive(Debug, PartialEq, Eq)] enum SingleInferenceResult { Unmodified, Modified, Incompatible } // ----------------------------------------------------------------------------- // PassTyping - Public Interface // ----------------------------------------------------------------------------- type InferNodeIndex = usize; type PolyDataIndex = isize; type VarDataIndex = usize; pub(crate) struct ResolveQueueElement { // Note that using the `definition_id` and the `monomorph_idx` one may // query the type table for the full procedure type, thereby retrieving // the polymorphic arguments to the procedure. pub(crate) root_id: RootId, pub(crate) definition_id: DefinitionId, pub(crate) reserved_type_id: TypeId, pub(crate) reserved_monomorph_index: u32, } pub(crate) type ResolveQueue = Vec; struct InferenceNode { // filled in during type inference expr_type: InferenceType, // result type from expression expr_id: ExpressionId, // expression that is evaluated inference_rule: InferenceRule, // rule used to infer node type parent_index: Option, // parent of inference node field_index: i32, // index of struct field or tuple member poly_data_index: PolyDataIndex, // index to inference data for polymorphic types // filled in once type inference is done info_type_id: TypeId, info_variant: ExpressionInfoVariant, } impl InferenceNode { #[inline] fn as_expression_info(&self) -> ExpressionInfo { return ExpressionInfo { type_id: self.info_type_id, variant: self.info_variant } } } /// Inferencing rule to apply. Some of these are reasonably generic. Other ones /// require so much custom logic that we'll not try to come up with an /// abstraction. enum InferenceRule { Noop, MonoTemplate(InferenceRuleTemplate), BiEqual(InferenceRuleBiEqual), TriEqualArgs(InferenceRuleTriEqualArgs), TriEqualAll(InferenceRuleTriEqualAll), Concatenate(InferenceRuleTwoArgs), IndexingExpr(InferenceRuleIndexingExpr), SlicingExpr(InferenceRuleSlicingExpr), SelectStructField(InferenceRuleSelectStructField), SelectTupleMember(InferenceRuleSelectTupleMember), LiteralStruct(InferenceRuleLiteralStruct), LiteralEnum, LiteralUnion(InferenceRuleLiteralUnion), LiteralArray(InferenceRuleLiteralArray), LiteralTuple(InferenceRuleLiteralTuple), CastExpr(InferenceRuleCastExpr), CallExpr(InferenceRuleCallExpr), VariableExpr(InferenceRuleVariableExpr), } impl InferenceRule { union_cast_method_impl!(as_mono_template, InferenceRuleTemplate, InferenceRule::MonoTemplate); union_cast_method_impl!(as_bi_equal, InferenceRuleBiEqual, InferenceRule::BiEqual); union_cast_method_impl!(as_tri_equal_args, InferenceRuleTriEqualArgs, InferenceRule::TriEqualArgs); union_cast_method_impl!(as_tri_equal_all, InferenceRuleTriEqualAll, InferenceRule::TriEqualAll); union_cast_method_impl!(as_concatenate, InferenceRuleTwoArgs, InferenceRule::Concatenate); union_cast_method_impl!(as_indexing_expr, InferenceRuleIndexingExpr, InferenceRule::IndexingExpr); union_cast_method_impl!(as_slicing_expr, InferenceRuleSlicingExpr, InferenceRule::SlicingExpr); union_cast_method_impl!(as_select_struct_field, InferenceRuleSelectStructField, InferenceRule::SelectStructField); union_cast_method_impl!(as_select_tuple_member, InferenceRuleSelectTupleMember, InferenceRule::SelectTupleMember); union_cast_method_impl!(as_literal_struct, InferenceRuleLiteralStruct, InferenceRule::LiteralStruct); union_cast_method_impl!(as_literal_union, InferenceRuleLiteralUnion, InferenceRule::LiteralUnion); union_cast_method_impl!(as_literal_array, InferenceRuleLiteralArray, InferenceRule::LiteralArray); union_cast_method_impl!(as_literal_tuple, InferenceRuleLiteralTuple, InferenceRule::LiteralTuple); union_cast_method_impl!(as_cast_expr, InferenceRuleCastExpr, InferenceRule::CastExpr); union_cast_method_impl!(as_call_expr, InferenceRuleCallExpr, InferenceRule::CallExpr); union_cast_method_impl!(as_variable_expr, InferenceRuleVariableExpr, InferenceRule::VariableExpr); } // Note: InferenceRuleTemplate is `Copy`, so don't add dynamically allocated // members in the future (or review places where this struct is copied) #[derive(Clone, Copy)] struct InferenceRuleTemplate { template: &'static [InferenceTypePart], application: InferenceRuleTemplateApplication, } impl InferenceRuleTemplate { fn new_none() -> Self { return Self{ template: &[], application: InferenceRuleTemplateApplication::None, }; } fn new_forced(template: &'static [InferenceTypePart]) -> Self { return Self{ template, application: InferenceRuleTemplateApplication::Forced, }; } fn new_template(template: &'static [InferenceTypePart]) -> Self { return Self{ template, application: InferenceRuleTemplateApplication::Template, } } } #[derive(Clone, Copy)] enum InferenceRuleTemplateApplication { None, // do not apply template, silly, but saves some bytes Forced, Template, } /// Type equality applied to 'self' and the argument. An optional template will /// be applied to 'self' first. Example: "bitwise not" struct InferenceRuleBiEqual { template: InferenceRuleTemplate, argument_index: InferNodeIndex, } /// Type equality applied to two arguments. Template can be applied to 'self' /// (generally forced, since this rule does not apply a type equality constraint /// to 'self') and the two arguments. Example: "equality operator" struct InferenceRuleTriEqualArgs { argument_template: InferenceRuleTemplate, result_template: InferenceRuleTemplate, argument1_index: InferNodeIndex, argument2_index: InferNodeIndex, } /// Type equality applied to 'self' and two arguments. Template may be /// optionally applied to 'self'. Example: "addition operator" struct InferenceRuleTriEqualAll { template: InferenceRuleTemplate, argument1_index: InferNodeIndex, argument2_index: InferNodeIndex, } /// Information for an inference rule that is applied to 'self' and two /// arguments, see `InferenceRule` for its meaning. struct InferenceRuleTwoArgs { argument1_index: InferNodeIndex, argument2_index: InferNodeIndex, } struct InferenceRuleIndexingExpr { subject_index: InferNodeIndex, index_index: InferNodeIndex, } struct InferenceRuleSlicingExpr { subject_index: InferNodeIndex, from_index: InferNodeIndex, to_index: InferNodeIndex, } struct InferenceRuleSelectStructField { subject_index: InferNodeIndex, selected_field: Identifier, } struct InferenceRuleSelectTupleMember { subject_index: InferNodeIndex, selected_index: u64, } struct InferenceRuleLiteralStruct { element_indices: Vec, } struct InferenceRuleLiteralUnion { element_indices: Vec } struct InferenceRuleLiteralArray { element_indices: Vec } struct InferenceRuleLiteralTuple { element_indices: Vec } struct InferenceRuleCastExpr { subject_index: InferNodeIndex, } struct InferenceRuleCallExpr { argument_indices: Vec } /// Data associated with a variable expression: an expression that reads the /// value from a variable. struct InferenceRuleVariableExpr { var_data_index: VarDataIndex, // shared variable information } /// This particular visitor will recurse depth-first into the AST and ensures /// that all expressions have the appropriate types. pub(crate) struct PassTyping { // Current definition we're typechecking. reserved_type_id: TypeId, reserved_monomorph_index: u32, procedure_id: ProcedureDefinitionId, procedure_kind: ProcedureKind, poly_vars: Vec, // Temporary variables during construction of inference rulesr parent_index: Option, // Buffers for iteration over various types var_buffer: ScopedBuffer, expr_buffer: ScopedBuffer, stmt_buffer: ScopedBuffer, bool_buffer: ScopedBuffer, index_buffer: ScopedBuffer, poly_progress_buffer: ScopedBuffer, // Mapping from parser type to inferred type. We attempt to continue to // specify these types until we're stuck or we've fully determined the type. infer_nodes: Vec, // will be transferred to type table at end poly_data: Vec, // data for polymorph inference var_data: Vec, // Keeping track of which expressions need to be reinferred because the // expressions they're linked to made progression on an associated type node_queued: DequeSet, } /// Generic struct that is used to store inferred types associated with /// polymorphic types. struct PolyData { first_rule_application: bool, definition_id: DefinitionId, // the definition, only used for user feedback /// Inferred types of the polymorphic variables as they are written down /// at the type's definition. poly_vars: Vec, expr_types: PolyDataTypes, } // silly structure, just so we can use `PolyDataTypeIndex` ergonomically while // making sure we're still capable of borrowing from `poly_vars`. struct PolyDataTypes { /// Inferred types of associated types (e.g. struct fields, tuple members, /// function arguments). These types may depend on the polymorphic variables /// defined above. associated: Vec, /// Inferred "returned" type (e.g. if a struct field is selected, then this /// contains the type of the selected field, for a function call it contains /// the return type). May depend on the polymorphic variables defined above. returned: InferenceType, } #[derive(Clone, Copy)] enum PolyDataTypeIndex { Associated(usize), // indexes into `PolyData.associated` Returned, } impl PolyDataTypes { fn get_type(&self, index: PolyDataTypeIndex) -> &InferenceType { match index { PolyDataTypeIndex::Associated(index) => return &self.associated[index], PolyDataTypeIndex::Returned => return &self.returned, } } fn get_type_mut(&mut self, index: PolyDataTypeIndex) -> &mut InferenceType { match index { PolyDataTypeIndex::Associated(index) => return &mut self.associated[index], PolyDataTypeIndex::Returned => return &mut self.returned, } } } struct VarData { var_id: VariableId, var_type: InferenceType, used_at: Vec, // of variable expressions linked_var: Option, } impl PassTyping { pub(crate) fn new() -> Self { PassTyping { reserved_type_id: TypeId::new_invalid(), reserved_monomorph_index: u32::MAX, procedure_id: ProcedureDefinitionId::new_invalid(), procedure_kind: ProcedureKind::Function, poly_vars: Vec::new(), parent_index: None, var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE), expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE), stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_LARGE), bool_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL), index_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL), poly_progress_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAP_SMALL), infer_nodes: Vec::with_capacity(BUFFER_INIT_CAP_LARGE), poly_data: Vec::with_capacity(BUFFER_INIT_CAP_SMALL), var_data: Vec::with_capacity(BUFFER_INIT_CAP_SMALL), node_queued: DequeSet::new(), } } pub(crate) fn queue_module_definitions(ctx: &mut Ctx, queue: &mut ResolveQueue) { debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::ValidatedAndLinked); let root_id = ctx.module().root_id; let root = &ctx.heap.protocol_descriptions[root_id]; for definition_id in &root.definitions { let definition = &ctx.heap[*definition_id]; let first_concrete_part_and_procedure_id = match definition { Definition::Procedure(definition) => { if definition.poly_vars.is_empty() { if definition.kind == ProcedureKind::Function { Some((ConcreteTypePart::Function(definition.this, 0), definition.this)) } else { Some((ConcreteTypePart::Component(definition.this, 0), definition.this)) } } else { None } } Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => None, }; if let Some((first_concrete_part, procedure_id)) = first_concrete_part_and_procedure_id { let procedure = &mut ctx.heap[procedure_id]; let monomorph_index = procedure.monomorphs.len() as u32; procedure.monomorphs.push(ProcedureDefinitionMonomorph::new_invalid()); let concrete_type = ConcreteType{ parts: vec![first_concrete_part] }; let type_id = ctx.types.reserve_procedure_monomorph_type_id(definition_id, concrete_type, monomorph_index); queue.push(ResolveQueueElement{ root_id, definition_id: *definition_id, reserved_type_id: type_id, reserved_monomorph_index: monomorph_index, }) } } } pub(crate) fn handle_module_definition( &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement ) -> VisitorResult { self.reset(); debug_assert_eq!(ctx.module().root_id, element.root_id); debug_assert!(self.poly_vars.is_empty()); // Prepare for visiting the definition self.reserved_type_id = element.reserved_type_id; self.reserved_monomorph_index = element.reserved_monomorph_index; let proc_base = ctx.types.get_base_definition(&element.definition_id).unwrap(); if proc_base.is_polymorph { let monomorph = ctx.types.get_monomorph(element.reserved_type_id); for poly_arg in monomorph.concrete_type.embedded_iter(0) { self.poly_vars.push(ConcreteType{ parts: Vec::from(poly_arg) }); } } // Visit the definition, setting up the type resolving process, then // (attempt to) resolve all types self.visit_definition(ctx, element.definition_id)?; self.resolve_types(ctx, queue)?; Ok(()) } fn reset(&mut self) { self.reserved_type_id = TypeId::new_invalid(); self.procedure_id = ProcedureDefinitionId::new_invalid(); self.procedure_kind = ProcedureKind::Function; self.poly_vars.clear(); self.parent_index = None; self.infer_nodes.clear(); self.poly_data.clear(); self.var_data.clear(); self.node_queued.clear(); } } // ----------------------------------------------------------------------------- // PassTyping - Visitor-like implementation // ----------------------------------------------------------------------------- type VisitorResult = Result<(), ParseError>; type VisitExprResult = Result; impl PassTyping { // Definitions fn visit_definition(&mut self, ctx: &mut Ctx, id: DefinitionId) -> VisitorResult { return visitor_recursive_definition_impl!(self, &ctx.heap[id], ctx); } fn visit_enum_definition(&mut self, _: &mut Ctx, _: EnumDefinitionId) -> VisitorResult { return Ok(()) } fn visit_struct_definition(&mut self, _: &mut Ctx, _: StructDefinitionId) -> VisitorResult { return Ok(()) } fn visit_union_definition(&mut self, _: &mut Ctx, _: UnionDefinitionId) -> VisitorResult { return Ok(()) } fn visit_procedure_definition(&mut self, ctx: &mut Ctx, id: ProcedureDefinitionId) -> VisitorResult { let procedure_def = &ctx.heap[id]; self.procedure_id = id; self.procedure_kind = procedure_def.kind; let body_id = procedure_def.body; debug_log!("{}", "-".repeat(50)); debug_log!("Visiting procedure: '{}' (id: {}, kind: {:?})", procedure_def.identifier.value.as_str(), id.0.index, procedure_def.kind); debug_log!("{}", "-".repeat(50)); // Visit parameters let section = self.var_buffer.start_section_initialized(procedure_def.parameters.as_slice()); for param_id in section.iter_copied() { let param = &ctx.heap[param_id]; let var_type = self.determine_inference_type_from_parser_type_elements(¶m.parser_type.elements, true); debug_assert!(var_type.is_done, "expected function arguments to be concrete types"); self.var_data.push(VarData{ var_id: param_id, var_type, used_at: Vec::new(), linked_var: None }) } section.forget(); // Visit all of the expressions within the body self.parent_index = None; return self.visit_block_stmt(ctx, body_id); } // Statements fn visit_stmt(&mut self, ctx: &mut Ctx, id: StatementId) -> VisitorResult { return visitor_recursive_statement_impl!(self, &ctx.heap[id], ctx, Ok(())); } fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult { // Transfer statements for traversal let block = &ctx.heap[id]; let section = self.stmt_buffer.start_section_initialized(block.statements.as_slice()); for stmt_id in section.iter_copied() { self.visit_stmt(ctx, stmt_id)?; } section.forget(); Ok(()) } fn visit_local_stmt(&mut self, ctx: &mut Ctx, id: LocalStatementId) -> VisitorResult { return visitor_recursive_local_impl!(self, &ctx.heap[id], ctx); } fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult { let memory_stmt = &ctx.heap[id]; let initial_expr_id = memory_stmt.initial_expr; let local = &ctx.heap[memory_stmt.variable]; let var_type = self.determine_inference_type_from_parser_type_elements(&local.parser_type.elements, true); self.var_data.push(VarData{ var_id: memory_stmt.variable, var_type, used_at: Vec::new(), linked_var: None, }); // Process the initial value self.visit_assignment_expr(ctx, initial_expr_id)?; Ok(()) } fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult { let channel_stmt = &ctx.heap[id]; let from_var_index = self.var_data.len() as VarDataIndex; let to_var_index = from_var_index + 1; let from_local = &ctx.heap[channel_stmt.from]; let from_var_type = self.determine_inference_type_from_parser_type_elements(&from_local.parser_type.elements, true); self.var_data.push(VarData{ var_id: channel_stmt.from, var_type: from_var_type, used_at: Vec::new(), linked_var: Some(to_var_index), }); let to_local = &ctx.heap[channel_stmt.to]; let to_var_type = self.determine_inference_type_from_parser_type_elements(&to_local.parser_type.elements, true); self.var_data.push(VarData{ var_id: channel_stmt.to, var_type: to_var_type, used_at: Vec::new(), linked_var: Some(from_var_index), }); Ok(()) } fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult { let labeled_stmt = &ctx.heap[id]; let substmt_id = labeled_stmt.body; self.visit_stmt(ctx, substmt_id) } fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult { let if_stmt = &ctx.heap[id]; let true_body_case = if_stmt.true_case; let false_body_case = if_stmt.false_case; let test_expr_id = if_stmt.test; self.visit_expr(ctx, test_expr_id)?; self.visit_stmt(ctx, true_body_case.body)?; if let Some(false_body_case) = false_body_case { self.visit_stmt(ctx, false_body_case.body)?; } Ok(()) } fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult { let while_stmt = &ctx.heap[id]; let body_id = while_stmt.body; let test_expr_id = while_stmt.test; self.visit_expr(ctx, test_expr_id)?; self.visit_stmt(ctx, body_id)?; Ok(()) } fn visit_break_stmt(&mut self, _: &mut Ctx, _: BreakStatementId) -> VisitorResult { return Ok(()) } fn visit_continue_stmt(&mut self, _: &mut Ctx, _: ContinueStatementId) -> VisitorResult { return Ok(()) } fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult { let sync_stmt = &ctx.heap[id]; let body_id = sync_stmt.body; self.visit_stmt(ctx, body_id) } fn visit_fork_stmt(&mut self, ctx: &mut Ctx, id: ForkStatementId) -> VisitorResult { let fork_stmt = &ctx.heap[id]; let left_body_id = fork_stmt.left_body; let right_body_id = fork_stmt.right_body; self.visit_stmt(ctx, left_body_id)?; if let Some(right_body_id) = right_body_id { self.visit_stmt(ctx, right_body_id)?; } Ok(()) } fn visit_select_stmt(&mut self, ctx: &mut Ctx, id: SelectStatementId) -> VisitorResult { let select_stmt = &ctx.heap[id]; let mut section = self.stmt_buffer.start_section(); let num_cases = select_stmt.cases.len(); for case in &select_stmt.cases { section.push(case.guard); section.push(case.body); } for case_index in 0..num_cases { let base_index = 2 * case_index; let guard_stmt_id = section[base_index ]; let block_stmt_id = section[base_index + 1]; self.visit_stmt(ctx, guard_stmt_id)?; self.visit_stmt(ctx, block_stmt_id)?; } section.forget(); Ok(()) } fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult { let return_stmt = &ctx.heap[id]; debug_assert_eq!(return_stmt.expressions.len(), 1); let expr_id = return_stmt.expressions[0]; self.visit_expr(ctx, expr_id)?; return Ok(()); } fn visit_goto_stmt(&mut self, _: &mut Ctx, _: GotoStatementId) -> VisitorResult { return Ok(()) } fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult { let new_stmt = &ctx.heap[id]; let call_expr_id = new_stmt.expression; self.visit_call_expr(ctx, call_expr_id)?; return Ok(()); } fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult { let expr_stmt = &ctx.heap[id]; let subexpr_id = expr_stmt.expression; self.visit_expr(ctx, subexpr_id)?; return Ok(()); } // Expressions fn visit_expr(&mut self, ctx: &mut Ctx, id: ExpressionId) -> VisitExprResult { return visitor_recursive_expression_impl!(self, &ctx.heap[id], ctx); } fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitExprResult { use AssignmentOperator as AO; let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let assign_expr = &ctx.heap[id]; let assign_op = assign_expr.operation; let left_expr_id = assign_expr.left; let right_expr_id = assign_expr.right; let old_parent = self.parent_index.replace(self_index); let left_index = self.visit_expr(ctx, left_expr_id)?; let right_index = self.visit_expr(ctx, right_expr_id)?; let node = &mut self.infer_nodes[self_index]; let argument_template = match assign_op { AO::Set => InferenceRuleTemplate::new_none(), AO::Concatenated => InferenceRuleTemplate::new_template(&ARRAYLIKE_TEMPLATE), AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted => InferenceRuleTemplate::new_template(&NUMBERLIKE_TEMPLATE), AO::Remained | AO::ShiftedLeft | AO::ShiftedRight | AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred => InferenceRuleTemplate::new_template(&INTEGERLIKE_TEMPLATE), }; node.inference_rule = InferenceRule::TriEqualArgs(InferenceRuleTriEqualArgs{ argument_template, result_template: InferenceRuleTemplate::new_forced(&VOID_TEMPLATE), argument1_index: left_index, argument2_index: right_index, }); self.parent_index = old_parent; self.progress_inference_rule_tri_equal_args(ctx, self_index)?; return Ok(self_index); } fn visit_binding_expr(&mut self, ctx: &mut Ctx, id: BindingExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let binding_expr = &ctx.heap[id]; let bound_to_id = binding_expr.bound_to; let bound_from_id = binding_expr.bound_from; let old_parent = self.parent_index.replace(self_index); let arg_to_index = self.visit_expr(ctx, bound_to_id)?; let arg_from_index = self.visit_expr(ctx, bound_from_id)?; let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::TriEqualArgs(InferenceRuleTriEqualArgs{ argument_template: InferenceRuleTemplate::new_none(), result_template: InferenceRuleTemplate::new_forced(&BOOL_TEMPLATE), argument1_index: arg_to_index, argument2_index: arg_from_index, }); self.parent_index = old_parent; self.progress_inference_rule_tri_equal_args(ctx, self_index)?; return Ok(self_index); } fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let conditional_expr = &ctx.heap[id]; let test_expr_id = conditional_expr.test; let true_expr_id = conditional_expr.true_expression; let false_expr_id = conditional_expr.false_expression; let old_parent = self.parent_index.replace(self_index); self.visit_expr(ctx, test_expr_id)?; let true_index = self.visit_expr(ctx, true_expr_id)?; let false_index = self.visit_expr(ctx, false_expr_id)?; // Note: the test to the conditional expression has already been forced // to the boolean type. So the only thing we need to do while progressing // is to apply an equal3 constraint to the arguments and the result of // the expression. let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::TriEqualAll(InferenceRuleTriEqualAll{ template: InferenceRuleTemplate::new_none(), argument1_index: true_index, argument2_index: false_index, }); self.parent_index = old_parent; self.progress_inference_rule_tri_equal_all(ctx, self_index)?; return Ok(self_index); } fn visit_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> VisitExprResult { use BinaryOperator as BO; let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let binary_expr = &ctx.heap[id]; let binary_op = binary_expr.operation; let lhs_expr_id = binary_expr.left; let rhs_expr_id = binary_expr.right; let old_parent = self.parent_index.replace(self_index); let left_index = self.visit_expr(ctx, lhs_expr_id)?; let right_index = self.visit_expr(ctx, rhs_expr_id)?; let inference_rule = match binary_op { BO::Concatenate => InferenceRule::Concatenate(InferenceRuleTwoArgs{ argument1_index: left_index, argument2_index: right_index, }), BO::LogicalAnd | BO::LogicalOr => InferenceRule::TriEqualAll(InferenceRuleTriEqualAll{ template: InferenceRuleTemplate::new_forced(&BOOL_TEMPLATE), argument1_index: left_index, argument2_index: right_index, }), BO::BitwiseOr | BO::BitwiseXor | BO::BitwiseAnd | BO::Remainder | BO::ShiftLeft | BO::ShiftRight => InferenceRule::TriEqualAll(InferenceRuleTriEqualAll{ template: InferenceRuleTemplate::new_template(&INTEGERLIKE_TEMPLATE), argument1_index: left_index, argument2_index: right_index, }), BO::Equality | BO::Inequality => InferenceRule::TriEqualArgs(InferenceRuleTriEqualArgs{ argument_template: InferenceRuleTemplate::new_none(), result_template: InferenceRuleTemplate::new_forced(&BOOL_TEMPLATE), argument1_index: left_index, argument2_index: right_index, }), BO::LessThan | BO::GreaterThan | BO::LessThanEqual | BO::GreaterThanEqual => InferenceRule::TriEqualArgs(InferenceRuleTriEqualArgs{ argument_template: InferenceRuleTemplate::new_template(&NUMBERLIKE_TEMPLATE), result_template: InferenceRuleTemplate::new_forced(&BOOL_TEMPLATE), argument1_index: left_index, argument2_index: right_index, }), BO::Add | BO::Subtract | BO::Multiply | BO::Divide => InferenceRule::TriEqualAll(InferenceRuleTriEqualAll{ template: InferenceRuleTemplate::new_template(&NUMBERLIKE_TEMPLATE), argument1_index: left_index, argument2_index: right_index, }), }; let node = &mut self.infer_nodes[self_index]; node.inference_rule = inference_rule; self.parent_index = old_parent; self.progress_inference_rule(ctx, self_index)?; return Ok(self_index); } fn visit_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> VisitExprResult { use UnaryOperator as UO; let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let unary_expr = &ctx.heap[id]; let operation = unary_expr.operation; let arg_expr_id = unary_expr.expression; let old_parent = self.parent_index.replace(self_index); let argument_index = self.visit_expr(ctx, arg_expr_id)?; let template = match operation { UO::Positive | UO::Negative => InferenceRuleTemplate::new_template(&NUMBERLIKE_TEMPLATE), UO::BitwiseNot => InferenceRuleTemplate::new_template(&INTEGERLIKE_TEMPLATE), UO::LogicalNot => InferenceRuleTemplate::new_forced(&BOOL_TEMPLATE), }; let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::BiEqual(InferenceRuleBiEqual{ template, argument_index, }); self.parent_index = old_parent; self.progress_inference_rule_bi_equal(ctx, self_index)?; return Ok(self_index); } fn visit_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let indexing_expr = &ctx.heap[id]; let subject_expr_id = indexing_expr.subject; let index_expr_id = indexing_expr.index; let old_parent = self.parent_index.replace(self_index); let subject_index = self.visit_expr(ctx, subject_expr_id)?; let index_index = self.visit_expr(ctx, index_expr_id)?; // cool name, bro let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::IndexingExpr(InferenceRuleIndexingExpr{ subject_index, index_index, }); self.parent_index = old_parent; self.progress_inference_rule_indexing_expr(ctx, self_index)?; return Ok(self_index); } fn visit_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let slicing_expr = &ctx.heap[id]; let subject_expr_id = slicing_expr.subject; let from_expr_id = slicing_expr.from_index; let to_expr_id = slicing_expr.to_index; let old_parent = self.parent_index.replace(self_index); let subject_index = self.visit_expr(ctx, subject_expr_id)?; let from_index = self.visit_expr(ctx, from_expr_id)?; let to_index = self.visit_expr(ctx, to_expr_id)?; let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::SlicingExpr(InferenceRuleSlicingExpr{ subject_index, from_index, to_index, }); self.parent_index = old_parent; self.progress_inference_rule_slicing_expr(ctx, self_index)?; return Ok(self_index); } fn visit_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let select_expr = &ctx.heap[id]; let subject_expr_id = select_expr.subject; let old_parent = self.parent_index.replace(self_index); let subject_index = self.visit_expr(ctx, subject_expr_id)?; let node = &mut self.infer_nodes[self_index]; let inference_rule = match &ctx.heap[id].kind { SelectKind::StructField(field_identifier) => InferenceRule::SelectStructField(InferenceRuleSelectStructField{ subject_index, selected_field: field_identifier.clone(), }), SelectKind::TupleMember(member_index) => InferenceRule::SelectTupleMember(InferenceRuleSelectTupleMember{ subject_index, selected_index: *member_index, }), }; node.inference_rule = inference_rule; self.parent_index = old_parent; self.progress_inference_rule(ctx, self_index)?; return Ok(self_index); } fn visit_literal_expr(&mut self, ctx: &mut Ctx, id: LiteralExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let old_parent = self.parent_index.replace(self_index); let literal_expr = &ctx.heap[id]; match &literal_expr.value { Literal::Null => { let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::MonoTemplate(InferenceRuleTemplate::new_template(&MESSAGE_TEMPLATE)); }, Literal::Integer(_) => { let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::MonoTemplate(InferenceRuleTemplate::new_template(&INTEGERLIKE_TEMPLATE)); }, Literal::True | Literal::False => { let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::MonoTemplate(InferenceRuleTemplate::new_forced(&BOOL_TEMPLATE)); }, Literal::Character(_) => { let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::MonoTemplate(InferenceRuleTemplate::new_forced(&CHARACTER_TEMPLATE)); }, Literal::String(_) => { let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::MonoTemplate(InferenceRuleTemplate::new_forced(&STRING_TEMPLATE)); }, Literal::Struct(literal) => { // Visit field expressions let mut expr_ids = self.expr_buffer.start_section(); for field in &literal.fields { expr_ids.push(field.value); } let mut expr_indices = self.index_buffer.start_section(); for expr_id in expr_ids.iter_copied() { let expr_index = self.visit_expr(ctx, expr_id)?; expr_indices.push(expr_index); } expr_ids.forget(); let element_indices = expr_indices.into_vec(); // Assign rule and extra data index to inference node let poly_data_index = self.insert_initial_struct_polymorph_data(ctx, id); let node = &mut self.infer_nodes[self_index]; node.poly_data_index = poly_data_index; node.inference_rule = InferenceRule::LiteralStruct(InferenceRuleLiteralStruct{ element_indices, }); }, Literal::Enum(_) => { // Enumerations do not carry any subexpressions, but may still // have a user-defined polymorphic marker variable. For this // reason we may still have to apply inference to this // polymorphic variable let poly_data_index = self.insert_initial_enum_polymorph_data(ctx, id); let node = &mut self.infer_nodes[self_index]; node.poly_data_index = poly_data_index; node.inference_rule = InferenceRule::LiteralEnum; }, Literal::Union(literal) => { // May carry subexpressions and polymorphic arguments let expr_ids = self.expr_buffer.start_section_initialized(literal.values.as_slice()); let poly_data_index = self.insert_initial_union_polymorph_data(ctx, id); let mut expr_indices = self.index_buffer.start_section(); for expr_id in expr_ids.iter_copied() { let expr_index = self.visit_expr(ctx, expr_id)?; expr_indices.push(expr_index); } expr_ids.forget(); let element_indices = expr_indices.into_vec(); let node = &mut self.infer_nodes[self_index]; node.poly_data_index = poly_data_index; node.inference_rule = InferenceRule::LiteralUnion(InferenceRuleLiteralUnion{ element_indices, }); }, Literal::Array(expressions) => { let expr_ids = self.expr_buffer.start_section_initialized(expressions.as_slice()); let mut expr_indices = self.index_buffer.start_section(); for expr_id in expr_ids.iter_copied() { let expr_index = self.visit_expr(ctx, expr_id)?; expr_indices.push(expr_index); } expr_ids.forget(); let element_indices = expr_indices.into_vec(); let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::LiteralArray(InferenceRuleLiteralArray{ element_indices, }); }, Literal::Tuple(expressions) => { let expr_ids = self.expr_buffer.start_section_initialized(expressions.as_slice()); let mut expr_indices = self.index_buffer.start_section(); for expr_id in expr_ids.iter_copied() { let expr_index = self.visit_expr(ctx, expr_id)?; expr_indices.push(expr_index); } expr_ids.forget(); let element_indices = expr_indices.into_vec(); let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::LiteralTuple(InferenceRuleLiteralTuple{ element_indices, }) } } self.parent_index = old_parent; self.progress_inference_rule(ctx, self_index)?; return Ok(self_index); } fn visit_cast_expr(&mut self, ctx: &mut Ctx, id: CastExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let cast_expr = &ctx.heap[id]; let subject_expr_id = cast_expr.subject; let old_parent = self.parent_index.replace(self_index); let subject_index = self.visit_expr(ctx, subject_expr_id)?; let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::CastExpr(InferenceRuleCastExpr{ subject_index, }); self.parent_index = old_parent; // The cast expression is a bit special at this point: the progression // function simply makes sure input/output types are compatible. But if // the programmer explicitly specified the output type, then we can // already perform that inference rule here. { let cast_expr = &ctx.heap[id]; let specified_type = self.determine_inference_type_from_parser_type_elements(&cast_expr.to_type.elements, true); let _progress = self.apply_template_constraint(ctx, self_index, &specified_type.parts)?; } self.progress_inference_rule_cast_expr(ctx, self_index)?; return Ok(self_index); } fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let extra_index = self.insert_initial_call_polymorph_data(ctx, id); // By default we set the polymorph idx for calls to 0. If the call // refers to a non-polymorphic function, then it will be "monomorphed" // once, hence we end up pointing to the correct instance. self.infer_nodes[self_index].field_index = 0; // Visit all arguments let old_parent = self.parent_index.replace(self_index); let call_expr = &ctx.heap[id]; let expr_ids = self.expr_buffer.start_section_initialized(call_expr.arguments.as_slice()); let mut expr_indices = self.index_buffer.start_section(); for arg_expr_id in expr_ids.iter_copied() { let expr_index = self.visit_expr(ctx, arg_expr_id)?; expr_indices.push(expr_index); } expr_ids.forget(); let argument_indices = expr_indices.into_vec(); let node = &mut self.infer_nodes[self_index]; node.poly_data_index = extra_index; node.inference_rule = InferenceRule::CallExpr(InferenceRuleCallExpr{ argument_indices, }); self.parent_index = old_parent; self.progress_inference_rule_call_expr(ctx, self_index)?; return Ok(self_index); } fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitExprResult { let upcast_id = id.upcast(); let self_index = self.insert_initial_inference_node(ctx, upcast_id)?; let var_expr = &ctx.heap[id]; debug_assert!(var_expr.declaration.is_some()); let old_parent = self.parent_index.replace(self_index); let declaration = &ctx.heap[var_expr.declaration.unwrap()]; let mut var_data_index = None; for (index, var_data) in self.var_data.iter().enumerate() { if var_data.var_id == declaration.this { var_data_index = Some(index); break; } } let var_data_index = if let Some(var_data_index) = var_data_index { let var_data = &mut self.var_data[var_data_index]; var_data.used_at.push(self_index); var_data_index } else { // If we're in a binding expression then it might the first time we // encounter the variable, so add a `VarData` entry. debug_assert_eq!(declaration.kind, VariableKind::Binding); let var_type = self.determine_inference_type_from_parser_type_elements( &declaration.parser_type.elements, true ); let var_data_index = self.var_data.len(); self.var_data.push(VarData{ var_id: declaration.this, var_type, used_at: vec![self_index], linked_var: None, }); var_data_index }; let node = &mut self.infer_nodes[self_index]; node.inference_rule = InferenceRule::VariableExpr(InferenceRuleVariableExpr{ var_data_index, }); self.parent_index = old_parent; self.progress_inference_rule_variable_expr(ctx, self_index)?; return Ok(self_index); } } // ----------------------------------------------------------------------------- // PassTyping - Type-inference progression // ----------------------------------------------------------------------------- impl PassTyping { #[allow(dead_code)] // used when debug flag at the top of this file is true. fn debug_get_display_name(&self, ctx: &Ctx, node_index: InferNodeIndex) -> String { let expr_type = &self.infer_nodes[node_index].expr_type; expr_type.display_name(&ctx.heap) } fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError> { // Keep inferring until we can no longer make any progress while !self.node_queued.is_empty() { while !self.node_queued.is_empty() { let node_index = self.node_queued.pop_front().unwrap(); self.progress_inference_rule(ctx, node_index)?; } // Nothing is queued anymore. However we might have integer literals // whose type cannot be inferred. For convenience's sake we'll // infer these to be s32. for (infer_node_index, infer_node) in self.infer_nodes.iter_mut().enumerate() { let expr_type = &mut infer_node.expr_type; if !expr_type.is_done && expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike { // Force integer type to s32 expr_type.parts[0] = InferenceTypePart::SInt32; expr_type.is_done = true; // Requeue expression (and its parent, if it exists) self.node_queued.push_back(infer_node_index); if let Some(node_parent_index) = infer_node.parent_index { self.node_queued.push_back(node_parent_index); } } } } // Helper for transferring polymorphic variables to concrete types and // checking if they're completely specified fn poly_data_type_to_concrete_type( ctx: &Ctx, expr_id: ExpressionId, inference_poly_args: &Vec, first_concrete_part: ConcreteTypePart, ) -> Result { // Prepare storage vector let mut num_inference_parts = 0; for inference_type in inference_poly_args { num_inference_parts += inference_type.parts.len(); } let mut concrete_type = ConcreteType{ parts: Vec::with_capacity(1 + num_inference_parts), }; concrete_type.parts.push(first_concrete_part); // Go through all polymorphic arguments and add them to the concrete // types. for (poly_idx, poly_type) in inference_poly_args.iter().enumerate() { if !poly_type.is_done { let expr = &ctx.heap[expr_id]; let definition = match expr { Expression::Call(expr) => expr.procedure.upcast(), Expression::Literal(expr) => match &expr.value { Literal::Enum(lit) => lit.definition, Literal::Union(lit) => lit.definition, Literal::Struct(lit) => lit.definition, _ => unreachable!() }, _ => unreachable!(), }; let poly_vars = ctx.heap[definition].poly_vars(); return Err(ParseError::new_error_at_span( &ctx.module().source, expr.operation_span(), format!( "could not fully infer the type of polymorphic variable '{}' of this expression (got '{}')", poly_vars[poly_idx].value.as_str(), poly_type.display_name(&ctx.heap) ) )); } poly_type.write_concrete_type(&mut concrete_type); } Ok(concrete_type) } // Every expression checked, and new monomorphs are queued. Transfer the // expression information to the AST. If this is the first time we're // visiting this procedure then we assign expression indices as well. let procedure = &ctx.heap[self.procedure_id]; let num_infer_nodes = self.infer_nodes().len(); let mut monomorph = ProcedureDefinitionMonomorph{ argument_types: Vec::with_capacity(procedure.parameters.len()), expr_info: Vec::with_capacity(num_infer_nodes), }; // For all of the expressions look up the TypeId (or create a new one). // For function calls and component instantiations figure out if they // need to be typechecked for infer_node in self.infer_nodes.iter_mut() { // Determine type ID let expr = &ctx.heap[infer_node.expr_id]; let poly_data = &self.poly_data[infer_node.poly_data_index as usize]; // TODO: Maybe optimize? Split insertion up into lookup, then clone // if needed? let mut concrete_type = ConcreteType::default(); infer_node.expr_type.write_concrete_type(&mut concrete_type); let info_type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, concrete_type)?; // Determine procedure type ID, i.e. a called/instantiated // procedure's signature. let info_variant = if let Expression::Call(expr) = expr { // Construct full function type. If not yet typechecked then // queue it for typechecking. debug_assert!(expr.method.is_user_defined() || expr.method.is_public_builtin()); let num_poly_vars = poly_data.poly_vars.len() as u32; let first_part = match expr.method { Method::UserFunction => ConcreteTypePart::Function(expr.procedure, num_poly_vars), Method::UserComponent => ConcreteTypePart::Component(expr.procedure, num_poly_vars), _ => ConcreteTypePart::Function(expr.procedure, num_poly_vars), }; let definition_id = expr.procedure.upcast(); let signature_type = poly_data_type_to_concrete_type( ctx, infer_node.expr_id, &poly_data.poly_vars, first_part )?; let (type_id, monomorph_index) = if let Some(type_id) = ctx.types.get_procedure_monomorph_type_id(&definition_id, &signature_type.parts) { // Procedure is already typechecked let monomorph_index = ctx.types.get_monomorph(type_id).variant.as_procedure().monomorph_index; (type_id, monomorph_index) } else { // Procedure is not yet typechecked, reserve a TypeID and a monomorph index let procedure_to_check = &mut ctx.heap[expr.procedure]; let monomorph_index = procedure_to_check.monomorphs.len() as u32; procedure_to_check.monomorphs.push(ProcedureDefinitionMonomorph::new_invalid()); let type_id = ctx.types.reserve_procedure_monomorph_type_id(&definition_id, signature_type, monomorph_index); queue.push(ResolveQueueElement{ root_id: ctx.heap[definition_id].defined_in(), definition_id, reserved_type_id: type_id, reserved_monomorph_index: monomorph_index, }); (type_id, monomorph_index) }; ExpressionInfoVariant::Procedure(type_id, monomorph_index) } else if let Expression::Select(expr) = expr { ExpressionInfoVariant::Select(infer_node.field_index) } else { ExpressionInfoVariant::Generic }; infer_node.info_type_id = info_type_id; infer_node.info_variant = info_variant; } // Write the types of the arguments for parameter_id in procedure.parameters.iter().copied() { let mut concrete = ConcreteType::default(); let var_data = self.var_data.iter().find(|v| v.var_id == parameter_id).unwrap(); var_data.var_type.write_concrete_type(&mut concrete); let type_id = ctx.types.add_monomorphed_type(ctx.modules, ctx.heap, ctx.arch, concrete)?; monomorph.argument_types.push(type_id) } // Determine if we have already assigned type indices to the expressions // before (the indices that, for a monomorph, can retrieve the type of // the expression). let has_type_indices = self.reserved_monomorph_index == 0; if has_type_indices { // already have indices, so resize and then index into it monomorph.expr_info.resize_with(num_infer_nodes, ExpressionInfo::new_invalid()); for infer_node in self.infer_nodes.iter() { let type_index = ctx.heap[infer_node.expr_id].type_index(); monomorph.expr_info[type_index as usize] = infer_node.as_expression_info(); } } else { // no indices yet, need to be assigned in AST for infer_node in self.infer_nodes.iter() { let type_index = monomorph.expr_info.len(); monomorph.expr_info.push(infer_node.as_expression_info()); *ctx.heap[infer_node.expr_id].type_index_mut() = type_index as i32; } } // Push the information into the AST let procedure = &mut ctx.heap[self.procedure_id]; procedure.monomorphs[self.reserved_monomorph_index as usize] = monomorph; Ok(()) } fn progress_inference_rule(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { use InferenceRule as IR; let node = &self.infer_nodes[node_index]; match &node.inference_rule { IR::Noop => unreachable!(), IR::MonoTemplate(_) => self.progress_inference_rule_mono_template(ctx, node_index), IR::BiEqual(_) => self.progress_inference_rule_bi_equal(ctx, node_index), IR::TriEqualArgs(_) => self.progress_inference_rule_tri_equal_args(ctx, node_index), IR::TriEqualAll(_) => self.progress_inference_rule_tri_equal_all(ctx, node_index), IR::Concatenate(_) => self.progress_inference_rule_concatenate(ctx, node_index), IR::IndexingExpr(_) => self.progress_inference_rule_indexing_expr(ctx, node_index), IR::SlicingExpr(_) => self.progress_inference_rule_slicing_expr(ctx, node_index), IR::SelectStructField(_) => self.progress_inference_rule_select_struct_field(ctx, node_index), IR::SelectTupleMember(_) => self.progress_inference_rule_select_tuple_member(ctx, node_index), IR::LiteralStruct(_) => self.progress_inference_rule_literal_struct(ctx, node_index), IR::LiteralEnum => self.progress_inference_rule_literal_enum(ctx, node_index), IR::LiteralUnion(_) => self.progress_inference_rule_literal_union(ctx, node_index), IR::LiteralArray(_) => self.progress_inference_rule_literal_array(ctx, node_index), IR::LiteralTuple(_) => self.progress_inference_rule_literal_tuple(ctx, node_index), IR::CastExpr(_) => self.progress_inference_rule_cast_expr(ctx, node_index), IR::CallExpr(_) => self.progress_inference_rule_call_expr(ctx, node_index), IR::VariableExpr(_) => self.progress_inference_rule_variable_expr(ctx, node_index), } } fn progress_inference_rule_mono_template(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = *node.inference_rule.as_mono_template(); let progress = self.progress_template(ctx, node_index, rule.application, rule.template)?; if progress { self.queue_node_parent(node_index); } return Ok(()); } fn progress_inference_rule_bi_equal(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_bi_equal(); let template = rule.template; let arg_index = rule.argument_index; let base_progress = self.progress_template(ctx, node_index, template.application, template.template)?; let (node_progress, arg_progress) = self.apply_equal2_constraint(ctx, node_index, node_index, 0, arg_index, 0)?; if base_progress || node_progress { self.queue_node_parent(node_index); } if arg_progress { self.queue_node(arg_index); } return Ok(()) } fn progress_inference_rule_tri_equal_args(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_tri_equal_args(); let result_template = rule.result_template; let argument_template = rule.argument_template; let arg1_index = rule.argument1_index; let arg2_index = rule.argument2_index; let self_template_progress = self.progress_template(ctx, node_index, result_template.application, result_template.template)?; let arg1_template_progress = self.progress_template(ctx, arg1_index, argument_template.application, argument_template.template)?; let (arg1_progress, arg2_progress) = self.apply_equal2_constraint(ctx, node_index, arg1_index, 0, arg2_index, 0)?; if self_template_progress { self.queue_node_parent(node_index); } if arg1_template_progress || arg1_progress { self.queue_node(arg1_index); } if arg2_progress { self.queue_node(arg2_index); } return Ok(()); } fn progress_inference_rule_tri_equal_all(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_tri_equal_all(); let template = rule.template; let arg1_index = rule.argument1_index; let arg2_index = rule.argument2_index; let template_progress = self.progress_template(ctx, node_index, template.application, template.template)?; let (node_progress, arg1_progress, arg2_progress) = self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 0)?; if template_progress || node_progress { self.queue_node_parent(node_index); } if arg1_progress { self.queue_node(arg1_index); } if arg2_progress { self.queue_node(arg2_index); } return Ok(()); } fn progress_inference_rule_concatenate(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_concatenate(); let arg1_index = rule.argument1_index; let arg2_index = rule.argument2_index; // Two cases: one of the arguments is a string (then all must be), or // one of the arguments is an array (and all must be arrays). let (expr_is_str, expr_is_not_str) = self.type_is_certainly_or_certainly_not_string(node_index); let (arg1_is_str, arg1_is_not_str) = self.type_is_certainly_or_certainly_not_string(arg1_index); let (arg2_is_str, arg2_is_not_str) = self.type_is_certainly_or_certainly_not_string(arg2_index); let someone_is_str = expr_is_str || arg1_is_str || arg2_is_str; let someone_is_not_str = expr_is_not_str || arg1_is_not_str || arg2_is_not_str; println!("DEBUG: Running concat, is_str = {}, is_not_str = {}", someone_is_str, someone_is_not_str); // Note: this statement is an expression returning the progression bools let (node_progress, arg1_progress, arg2_progress) = if someone_is_str { // One of the arguments is a string, then all must be strings self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 0)? } else { let progress_expr = if someone_is_not_str { // Output must be a normal array self.apply_template_constraint(ctx, node_index, &ARRAY_TEMPLATE)? } else { // Output may still be anything self.apply_template_constraint(ctx, node_index, &ARRAYLIKE_TEMPLATE)? }; let progress_arg1 = self.apply_template_constraint(ctx, arg1_index, &ARRAYLIKE_TEMPLATE)?; let progress_arg2 = self.apply_template_constraint(ctx, arg2_index, &ARRAYLIKE_TEMPLATE)?; // If they're all arraylike, then we want the subtype to match let (subtype_expr, subtype_arg1, subtype_arg2) = self.apply_equal3_constraint(ctx, node_index, arg1_index, arg2_index, 1)?; (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2) }; if node_progress { self.queue_node_parent(node_index); } if arg1_progress { self.queue_node(arg1_index); } if arg2_progress { self.queue_node(arg2_index); } return Ok(()) } fn progress_inference_rule_indexing_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_indexing_expr(); let subject_index = rule.subject_index; let index_index = rule.index_index; // which one? // Subject is arraylike, index in integerlike let subject_template_progress = self.apply_template_constraint(ctx, subject_index, &ARRAYLIKE_TEMPLATE)?; let index_template_progress = self.apply_template_constraint(ctx, index_index, &INTEGERLIKE_TEMPLATE)?; // If subject is type `Array`, then expr type is `T` let (node_progress, subject_progress) = self.apply_equal2_constraint(ctx, node_index, node_index, 0, subject_index, 1)?; if node_progress { self.queue_node_parent(node_index); } if subject_template_progress || subject_progress { self.queue_node(subject_index); } if index_template_progress { self.queue_node(index_index); } return Ok(()); } fn progress_inference_rule_slicing_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_slicing_expr(); let subject_index = rule.subject_index; let from_index_index = rule.from_index; let to_index_index = rule.to_index; debug_log!("Rule slicing [node: {}, expr: {}]", node_index, node.expr_id.index); // Subject is arraylike, indices are integerlike let subject_template_progress = self.apply_template_constraint(ctx, subject_index, &ARRAYLIKE_TEMPLATE)?; let from_template_progress = self.apply_template_constraint(ctx, from_index_index, &INTEGERLIKE_TEMPLATE)?; let to_template_progress = self.apply_template_constraint(ctx, to_index_index, &INTEGERLIKE_TEMPLATE)?; let (from_index_progress, to_index_progress) = self.apply_equal2_constraint(ctx, node_index, from_index_index, 0, to_index_index, 0)?; // Same as array indexing: result depends on whether subject is string // or array let (is_string, is_not_string) = self.type_is_certainly_or_certainly_not_string(node_index); println!("DEBUG: Running slicing, is_str = {}, is_not_str = {}", is_string, is_not_string); let (node_progress, subject_progress) = if is_string { // Certainly a string ( self.apply_forced_constraint(ctx, node_index, &STRING_TEMPLATE)?, false ) } else if is_not_string { // Certainly not a string, apply template constraint. Then make sure // that if we have an `Array`, that the slice produces `Slice` let node_template_progress = self.apply_template_constraint(ctx, node_index, &SLICE_TEMPLATE)?; let (node_progress, subject_progress) = self.apply_equal2_constraint(ctx, node_index, node_index, 1, subject_index, 1)?; ( node_template_progress || node_progress, subject_progress ) } else { // Not sure yet let node_template_progress = self.apply_template_constraint(ctx, node_index, &ARRAYLIKE_TEMPLATE)?; let (node_progress, subject_progress) = self.apply_equal2_constraint(ctx, node_index, node_index, 1, subject_index, 1)?; ( node_template_progress || node_progress, subject_progress ) }; if node_progress { self.queue_node_parent(node_index); } if subject_template_progress || subject_progress { self.queue_node(subject_index); } if from_template_progress || from_index_progress { self.queue_node(from_index_index); } if to_template_progress || to_index_progress { self.queue_node(to_index_index); } return Ok(()); } fn progress_inference_rule_select_struct_field(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_select_struct_field(); let subject_index = rule.subject_index; let selected_field = rule.selected_field.clone(); fn get_definition_id_from_inference_type(inference_type: &InferenceType) -> Result, ()> { for part in inference_type.parts.iter() { if part.is_marker() { continue; } if !part.is_concrete() { break; } if let InferenceTypePart::Instance(definition_id, _) = part { return Ok(Some(*definition_id)); } else { return Err(()) } } // Nothing is known yet return Ok(None); } if node.field_index < 0 { // Don't know the subject definition, hence the field yet. Try to // determine it. let subject_node = &self.infer_nodes[subject_index]; match get_definition_id_from_inference_type(&subject_node.expr_type) { Ok(Some(definition_id)) => { // Determined definition of subject for the first time. let base_definition = ctx.types.get_base_definition(&definition_id).unwrap(); let struct_definition = if let DefinedTypeVariant::Struct(struct_definition) = &base_definition.definition { struct_definition } else { return Err(ParseError::new_error_at_span( &ctx.module().source, selected_field.span, format!( "Can only apply field access to structs, got a subject of type '{}'", subject_node.expr_type.display_name(&ctx.heap) ) )); }; // Seek the field that is referenced by the select // expression let mut field_found = false; for (field_index, field) in struct_definition.fields.iter().enumerate() { if field.identifier.value == selected_field.value { // Found the field of interest field_found = true; let node = &mut self.infer_nodes[node_index]; node.field_index = field_index as i32; break; } } if !field_found { let struct_definition = ctx.heap[definition_id].as_struct(); return Err(ParseError::new_error_at_span( &ctx.module().source, selected_field.span, format!( "this field does not exist on the struct '{}'", struct_definition.identifier.value.as_str() ) )); } // Insert the initial data needed to infer polymorphic // fields let extra_index = self.insert_initial_select_polymorph_data(ctx, node_index, definition_id); let node = &mut self.infer_nodes[node_index]; node.poly_data_index = extra_index; }, Ok(None) => { // We don't know what to do yet, because we don't know the // subject type yet. return Ok(()) }, Err(()) => { return Err(ParseError::new_error_at_span( &ctx.module().source, rule.selected_field.span, format!( "Can only apply field access to structs, got a subject of type '{}'", subject_node.expr_type.display_name(&ctx.heap) ) )); }, } } // If here then the field index is known, hence we can start inferring // the type of the selected field let field_expr_id = self.infer_nodes[node_index].expr_id; let subject_expr_id = self.infer_nodes[subject_index].expr_id; let mut poly_progress_section = self.poly_progress_buffer.start_section(); let (_, progress_subject_1) = self.apply_polydata_equal2_constraint( ctx, node_index, subject_expr_id, "selected struct's", PolyDataTypeIndex::Associated(0), 0, subject_index, 0, &mut poly_progress_section )?; let (_, progress_field_1) = self.apply_polydata_equal2_constraint( ctx, node_index, field_expr_id, "selected field's", PolyDataTypeIndex::Returned, 0, node_index, 0, &mut poly_progress_section )?; // Maybe make progress on types due to inferred polymorphic variables let progress_subject_2 = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Associated(0), subject_index, &poly_progress_section ); let progress_field_2 = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Returned, node_index, &poly_progress_section ); if progress_subject_1 || progress_subject_2 { self.queue_node(subject_index); } if progress_field_1 || progress_field_2 { self.queue_node_parent(node_index); } poly_progress_section.forget(); self.finish_polydata_constraint(node_index); return Ok(()) } fn progress_inference_rule_select_tuple_member(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_select_tuple_member(); let subject_index = rule.subject_index; let tuple_member_index = rule.selected_index; if node.field_index < 0 { let subject_type = &self.infer_nodes[subject_index].expr_type; let tuple_size = get_tuple_size_from_inference_type(subject_type); let tuple_size = match tuple_size { Ok(Some(tuple_size)) => { tuple_size }, Ok(None) => { // We can't infer anything yet return Ok(()) }, Err(()) => { let select_expr_span = ctx.heap[node.expr_id].full_span(); return Err(ParseError::new_error_at_span( &ctx.module().source, select_expr_span, format!( "tuple element select cannot be applied to a subject of type '{}'", subject_type.display_name(&ctx.heap) ) )); } }; // If here then we at least have the tuple size. Now check if the // index doesn't exceed that size. if tuple_member_index >= tuple_size as u64 { let select_expr_span = ctx.heap[node.expr_id].full_span(); return Err(ParseError::new_error_at_span( &ctx.module().source, select_expr_span, format!( "element index {} is out of bounds, tuple has {} elements", tuple_member_index, tuple_size ) )); } // Within bounds, set index on the type inference node let node = &mut self.infer_nodes[node_index]; node.field_index = tuple_member_index as i32; } // If here then we know we can use `tuple_member_index`. We need to keep // computing the offset to the subtype, as its value changes during // inference let subject_type = &self.infer_nodes[subject_index].expr_type; let mut selected_member_start_index = 1; // start just after the InferenceTypeElement::Tuple for _ in 0..tuple_member_index { selected_member_start_index = InferenceType::find_subtree_end_idx(&subject_type.parts, selected_member_start_index); } let (progress_member, progress_subject) = self.apply_equal2_constraint( ctx, node_index, node_index, 0, subject_index, selected_member_start_index )?; if progress_member { self.queue_node_parent(node_index); } if progress_subject { self.queue_node(subject_index); } return Ok(()); } fn progress_inference_rule_literal_struct(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let node_expr_id = node.expr_id; let rule = node.inference_rule.as_literal_struct(); // For each of the fields in the literal struct, apply the type equality // constraint. If the literal is polymorphic, then we try to progress // their types during this process let element_indices_section = self.index_buffer.start_section_initialized(&rule.element_indices); let mut poly_progress_section = self.poly_progress_buffer.start_section(); for (field_index, field_node_index) in element_indices_section.iter_copied().enumerate() { let field_expr_id = self.infer_nodes[field_node_index].expr_id; let (_, progress_field) = self.apply_polydata_equal2_constraint( ctx, node_index, field_expr_id, "struct field's", PolyDataTypeIndex::Associated(field_index), 0, field_node_index, 0, &mut poly_progress_section )?; if progress_field { self.queue_node(field_node_index); } } // Now we do the same thing for the struct literal expression (the type // of the struct itself). let (_, progress_literal_1) = self.apply_polydata_equal2_constraint( ctx, node_index, node_expr_id, "struct literal's", PolyDataTypeIndex::Returned, 0, node_index, 0, &mut poly_progress_section )?; // And the other way around: if any of our polymorphic variables are // more specific then they were before, then we forward that information // back to our struct/fields. for (field_index, field_node_index) in element_indices_section.iter_copied().enumerate() { let progress_field = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Associated(field_index), field_node_index, &poly_progress_section ); if progress_field { self.queue_node(field_node_index); } } let progress_literal_2 = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Returned, node_index, &poly_progress_section ); if progress_literal_1 || progress_literal_2 { self.queue_node_parent(node_index); } poly_progress_section.forget(); element_indices_section.forget(); self.finish_polydata_constraint(node_index); return Ok(()) } fn progress_inference_rule_literal_enum(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let node_expr_id = node.expr_id; let mut poly_progress_section = self.poly_progress_buffer.start_section(); // An enum literal type is simply, well, the enum's type. However, it // might still have polymorphic variables, hence the use of `PolyData`. let (_, progress_literal_1) = self.apply_polydata_equal2_constraint( ctx, node_index, node_expr_id, "enum literal's", PolyDataTypeIndex::Returned, 0, node_index, 0, &mut poly_progress_section )?; let progress_literal_2 = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Returned, node_index, &poly_progress_section ); if progress_literal_1 || progress_literal_2 { self.queue_node_parent(node_index); } poly_progress_section.forget(); self.finish_polydata_constraint(node_index); return Ok(()); } fn progress_inference_rule_literal_union(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let node_expr_id = node.expr_id; let rule = node.inference_rule.as_literal_union(); // Infer type of any embedded values in the union variant. At the same // time progress the polymorphic variables associated with the union. let element_indices_section = self.index_buffer.start_section_initialized(&rule.element_indices); let mut poly_progress_section = self.poly_progress_buffer.start_section(); for (embedded_index, embedded_node_index) in element_indices_section.iter_copied().enumerate() { let embedded_node_expr_id = self.infer_nodes[embedded_node_index].expr_id; let (_, progress_embedded) = self.apply_polydata_equal2_constraint( ctx, node_index, embedded_node_expr_id, "embedded value's", PolyDataTypeIndex::Associated(embedded_index), 0, embedded_node_index, 0, &mut poly_progress_section )?; if progress_embedded { self.queue_node(embedded_node_index); } } let (_, progress_literal_1) = self.apply_polydata_equal2_constraint( ctx, node_index, node_expr_id, "union's", PolyDataTypeIndex::Returned, 0, node_index, 0, &mut poly_progress_section )?; // Propagate progress in the polymorphic variables to the expressions // that constitute the union literal. for (embedded_index, embedded_node_index) in element_indices_section.iter_copied().enumerate() { let progress_embedded = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Associated(embedded_index), embedded_node_index, &poly_progress_section ); if progress_embedded { self.queue_node(embedded_node_index); } } let progress_literal_2 = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Returned, node_index, &poly_progress_section ); if progress_literal_1 || progress_literal_2 { self.queue_node_parent(node_index); } poly_progress_section.forget(); self.finish_polydata_constraint(node_index); return Ok(()); } fn progress_inference_rule_literal_array(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_literal_array(); // Apply equality rule to all of the elements that form the array let argument_node_indices = self.index_buffer.start_section_initialized(&rule.element_indices); let mut argument_progress_section = self.bool_buffer.start_section(); self.apply_equal_n_constraint(ctx, node_index, &argument_node_indices, &mut argument_progress_section)?; debug_assert_eq!(argument_node_indices.len(), argument_progress_section.len()); for argument_index in 0..argument_node_indices.len() { let argument_node_index = argument_node_indices[argument_index]; let progress = argument_progress_section[argument_index]; if progress { self.queue_node(argument_node_index); } } // If elements are of type `T`, then the array is of type `Array`, so: let mut progress_literal = self.apply_template_constraint(ctx, node_index, &ARRAY_TEMPLATE)?; if argument_node_indices.len() != 0 { let argument_node_index = argument_node_indices[0]; let (progress_literal_inner, progress_argument) = self.apply_equal2_constraint( ctx, node_index, node_index, 1, argument_node_index, 0 )?; progress_literal = progress_literal || progress_literal_inner; // It is possible that the `Array` has a more progress `T` then // the arguments. So in the case we progress our argument type we // simply queue this rule again if progress_argument { self.queue_node(node_index); } } argument_node_indices.forget(); argument_progress_section.forget(); if progress_literal { self.queue_node_parent(node_index); } return Ok(()); } fn progress_inference_rule_literal_tuple(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_literal_tuple(); let element_indices = self.index_buffer.start_section_initialized(&rule.element_indices); // Check if we need to apply the initial tuple template type. Note that // this is a hacky check. let num_tuple_elements = rule.element_indices.len(); let mut template_type = Vec::with_capacity(num_tuple_elements + 1); // TODO: @performance template_type.push(InferenceTypePart::Tuple(num_tuple_elements as u32)); for _ in 0..num_tuple_elements { template_type.push(InferenceTypePart::Unknown); } let mut progress_literal = self.apply_template_constraint(ctx, node_index, &template_type)?; // Because of the (early returning error) check above, we're certain // that the tuple has the correct number of elements. Now match each // element expression type to the tuple subtype. let mut element_subtree_start_index = 1; // first element is InferenceTypePart::Tuple for element_node_index in element_indices.iter_copied() { let (progress_literal_element, progress_element) = self.apply_equal2_constraint( ctx, node_index, node_index, element_subtree_start_index, element_node_index, 0 )?; progress_literal = progress_literal || progress_literal_element; if progress_element { self.queue_node(element_node_index); } // Prepare for next element let node = &self.infer_nodes[node_index]; let subtree_end_index = InferenceType::find_subtree_end_idx(&node.expr_type.parts, element_subtree_start_index); element_subtree_start_index = subtree_end_index; } debug_assert_eq!(element_subtree_start_index, self.infer_nodes[node_index].expr_type.parts.len()); if progress_literal { self.queue_node_parent(node_index); } element_indices.forget(); return Ok(()); } fn progress_inference_rule_cast_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_cast_expr(); let subject_index = rule.subject_index; let subject = &self.infer_nodes[subject_index]; // Make sure that both types are completely done. Note: a cast // expression cannot really infer anything between the subject and the // output type, we can only make sure that, at the end, the cast is // correct. if !node.expr_type.is_done || !subject.expr_type.is_done { return Ok(()); } // Both types are known, currently the only valid casts are bool, // integer and character casts. fn is_bool_int_or_char(parts: &[InferenceTypePart]) -> bool { let mut index = 0; while index < parts.len() { let part = &parts[index]; if !part.is_marker() { break; } index += 1; } debug_assert!(index != parts.len()); let part = &parts[index]; if *part == InferenceTypePart::Bool || *part == InferenceTypePart::Character || part.is_concrete_integer() { debug_assert!(index + 1 == parts.len()); // type is done, first part does not have children -> must be at end return true; } else { return false; } } let is_valid = if is_bool_int_or_char(&node.expr_type.parts) && is_bool_int_or_char(&subject.expr_type.parts) { true } else if InferenceType::check_subtrees(&node.expr_type.parts, 0, &subject.expr_type.parts, 0) { // again: check_subtrees is sufficient since both types are done true } else { false }; if !is_valid { let cast_expr = &ctx.heap[node.expr_id]; let subject_expr = &ctx.heap[subject.expr_id]; return Err(ParseError::new_error_str_at_span( &ctx.module().source, cast_expr.full_span(), "invalid casting operation" ).with_info_at_span( &ctx.module().source, subject_expr.full_span(), format!( "cannot cast the argument type '{}' to the type '{}'", subject.expr_type.display_name(&ctx.heap), node.expr_type.display_name(&ctx.heap) ) )); } return Ok(()) } fn progress_inference_rule_call_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &self.infer_nodes[node_index]; let node_expr_id = node.expr_id; let rule = node.inference_rule.as_call_expr(); let mut poly_progress_section = self.poly_progress_buffer.start_section(); let argument_node_indices = self.index_buffer.start_section_initialized(&rule.argument_indices); // Perform inference on arguments to function, while trying to figure // out the polymorphic variables for (argument_index, argument_node_index) in argument_node_indices.iter_copied().enumerate() { let argument_expr_id = self.infer_nodes[argument_node_index].expr_id; let (_, progress_argument) = self.apply_polydata_equal2_constraint( ctx, node_index, argument_expr_id, "argument's", PolyDataTypeIndex::Associated(argument_index), 0, argument_node_index, 0, &mut poly_progress_section )?; if progress_argument { self.queue_node(argument_node_index); } } // Same for the return type. let (_, progress_call_1) = self.apply_polydata_equal2_constraint( ctx, node_index, node_expr_id, "return", PolyDataTypeIndex::Returned, 0, node_index, 0, &mut poly_progress_section )?; // We will now apply any progression in the polymorphic variable type // back to the arguments. for (argument_index, argument_node_index) in argument_node_indices.iter_copied().enumerate() { let progress_argument = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Associated(argument_index), argument_node_index, &poly_progress_section ); if progress_argument { self.queue_node(argument_node_index); } } // And back to the return type. let progress_call_2 = self.apply_polydata_polyvar_constraint( ctx, node_index, PolyDataTypeIndex::Returned, node_index, &poly_progress_section ); if progress_call_1 || progress_call_2 { self.queue_node_parent(node_index); } poly_progress_section.forget(); argument_node_indices.forget(); self.finish_polydata_constraint(node_index); return Ok(()) } fn progress_inference_rule_variable_expr(&mut self, ctx: &Ctx, node_index: InferNodeIndex) -> Result<(), ParseError> { let node = &mut self.infer_nodes[node_index]; let rule = node.inference_rule.as_variable_expr(); let var_data_index = rule.var_data_index; let var_data = &mut self.var_data[var_data_index]; // Apply inference to the shared variable type and the expression type let shared_type: *mut _ = &mut var_data.var_type; let expr_type: *mut _ = &mut node.expr_type; let inference_result = unsafe { // safety: vectors exist in different storage vectors, so cannot alias InferenceType::infer_subtrees_for_both_types(shared_type, 0, expr_type, 0) }; if inference_result == DualInferenceResult::Incompatible { return Err(self.construct_variable_type_error(ctx, node_index)); } let progress_var_data = inference_result.modified_lhs(); let progress_expr = inference_result.modified_rhs(); if progress_var_data { // We progressed the type of the shared variable, so propagate this // to all associated variable expressions (and relatived variables). for other_node_index in var_data.used_at.iter().copied() { if other_node_index != node_index { self.node_queued.push_back(other_node_index); } } if let Some(linked_var_data_index) = var_data.linked_var { // Only perform one-way inference, progressing the linked // variable. // note: because this "linking" is used only for channels, we // will start inference one level below the top-level in the // type tree (i.e. ensure `T` in `in` and `out` is equal). debug_assert!( var_data.var_type.parts[0] == InferenceTypePart::Input || var_data.var_type.parts[0] == InferenceTypePart::Output ); let this_var_type: *const _ = &var_data.var_type; let linked_var_data = &mut self.var_data[linked_var_data_index]; debug_assert!( linked_var_data.var_type.parts[0] == InferenceTypePart::Input || linked_var_data.var_type.parts[0] == InferenceTypePart::Output ); // safety: by construction var_data_index and linked_var_data_index cannot be the // same, hence we're not aliasing here. let inference_result = InferenceType::infer_subtree_for_single_type( &mut linked_var_data.var_type, 1, unsafe{ &(*this_var_type).parts }, 1, false ); match inference_result { SingleInferenceResult::Modified => { for used_at in linked_var_data.used_at.iter().copied() { self.node_queued.push_back(used_at); } }, SingleInferenceResult::Unmodified => {}, SingleInferenceResult::Incompatible => { let var_data_this = &self.var_data[var_data_index]; let var_decl_this = &ctx.heap[var_data_this.var_id]; let var_data_linked = &self.var_data[linked_var_data_index]; let var_decl_linked = &ctx.heap[var_data_linked.var_id]; return Err(ParseError::new_error_at_span( &ctx.module().source, var_decl_this.identifier.span, format!( "conflicting types for this channel, this port has type '{}'", var_data_this.var_type.display_name(&ctx.heap) ) ).with_info_at_span( &ctx.module().source, var_decl_linked.identifier.span, format!( "while this port has type '{}'", var_data_linked.var_type.display_name(&ctx.heap) ) )); } } } } if progress_expr { self.queue_node_parent(node_index); } return Ok(()); } fn progress_template(&mut self, ctx: &Ctx, node_index: InferNodeIndex, application: InferenceRuleTemplateApplication, template: &[InferenceTypePart]) -> Result { use InferenceRuleTemplateApplication as TA; match application { TA::None => Ok(false), TA::Template => self.apply_template_constraint(ctx, node_index, template), TA::Forced => self.apply_forced_constraint(ctx, node_index, template), } } fn queue_node_parent(&mut self, node_index: InferNodeIndex) { let node = &self.infer_nodes[node_index]; if let Some(parent_node_index) = node.parent_index { self.node_queued.push_back(parent_node_index); } } #[inline] fn queue_node(&mut self, node_index: InferNodeIndex) { self.node_queued.push_back(node_index); } /// Returns whether the type is certainly a string (true, false), certainly /// not a string (false, true), or still unknown (false, false). fn type_is_certainly_or_certainly_not_string(&self, node_index: InferNodeIndex) -> (bool, bool) { let expr_type = &self.infer_nodes[node_index].expr_type; println!("DEBUG: Running test on {:?}", expr_type.parts); let mut part_index = 0; while part_index < expr_type.parts.len() { let part = &expr_type.parts[part_index]; if part.is_marker() { part_index += 1; continue; } if !part.is_concrete() { break; } if *part == InferenceTypePart::String { // First part is a string return (true, false); } else { return (false, true); } } // If here then first non-marker type is not concrete if part_index == expr_type.parts.len() { // nothing known at all return (false, false); } // Special case: array-like where its argument is not a character if part_index + 1 < expr_type.parts.len() { if expr_type.parts[part_index] == InferenceTypePart::ArrayLike && expr_type.parts[part_index + 1] != InferenceTypePart::Character { return (false, true); } } (false, false) } /// Applies a template type constraint: the type associated with the /// supplied expression will be molded into the provided `template`. But /// will be considered valid if the template could've been molded into the /// expression type as well. Hence the template may be fully specified (e.g. /// a bool) or contain "inference" variables (e.g. an array of T) fn apply_template_constraint( &mut self, ctx: &Ctx, node_index: InferNodeIndex, template: &[InferenceTypePart] ) -> Result { let expr_type = &mut self.infer_nodes[node_index].expr_type; match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0, false) { SingleInferenceResult::Modified => Ok(true), SingleInferenceResult::Unmodified => Ok(false), SingleInferenceResult::Incompatible => Err( self.construct_template_type_error(ctx, node_index, template) ) } } /// Applies a forced constraint: the supplied expression's type MUST be /// inferred from the template, the other way around is considered invalid. fn apply_forced_constraint( &mut self, ctx: &Ctx, node_index: InferNodeIndex, template: &[InferenceTypePart] ) -> Result { let expr_type = &mut self.infer_nodes[node_index].expr_type; match InferenceType::infer_subtree_for_single_type(expr_type, 0, template, 0, true) { SingleInferenceResult::Modified => Ok(true), SingleInferenceResult::Unmodified => Ok(false), SingleInferenceResult::Incompatible => Err( self.construct_template_type_error(ctx, node_index, template) ) } } /// Applies a type constraint that expects the two provided types to be /// equal. We attempt to make progress in inferring the types. If the call /// is successful then the composition of all types are made equal. /// The "parent" `expr_id` is provided to construct errors. fn apply_equal2_constraint( &mut self, ctx: &Ctx, node_index: InferNodeIndex, arg1_index: InferNodeIndex, arg1_start_idx: usize, arg2_index: InferNodeIndex, arg2_start_idx: usize ) -> Result<(bool, bool), ParseError> { let arg1_type: *mut _ = &mut self.infer_nodes[arg1_index].expr_type; let arg2_type: *mut _ = &mut self.infer_nodes[arg2_index].expr_type; let infer_res = unsafe{ InferenceType::infer_subtrees_for_both_types( arg1_type, arg1_start_idx, arg2_type, arg2_start_idx ) }; if infer_res == DualInferenceResult::Incompatible { return Err(self.construct_arg_type_error(ctx, node_index, arg1_index, arg2_index)); } Ok((infer_res.modified_lhs(), infer_res.modified_rhs())) } /// Applies an equal2 constraint between a member of the `PolyData` struct, /// and another inferred type. If any progress is made in the `PolyData` /// struct then the affected polymorphic variables are updated as well. /// /// Because a lot of types/expressions are involved in polymorphic typFe /// inference, some explanation: "outer_node" refers to the main expression /// that is the root cause of type inference (e.g. a struct literal /// expression, or a tuple member select expression). Associated with that /// outer node is `PolyData`, so that is what the "poly_data" variables /// are referring to. We are applying equality between a "poly_data" type /// and an associated expression (not necessarily the "outer_node", e.g. /// the expression that constructs the value of a struct field). Hence the /// "associated" variables. /// /// Finally, when an error occurs we'll first show the outer node's /// location. As info, the `error_location_expr_id` span is shown, /// indicating that the "`error_type_name` type has been resolved to /// `outer_node_type`, but this expression has been resolved to /// `associated_node_type`". fn apply_polydata_equal2_constraint( &mut self, ctx: &Ctx, outer_node_index: InferNodeIndex, error_location_expr_id: ExpressionId, error_type_name: &str, poly_data_type_index: PolyDataTypeIndex, poly_data_start_index: usize, associated_node_index: InferNodeIndex, associated_node_start_index: usize, poly_progress_section: &mut ScopedSection, ) -> Result<(bool, bool), ParseError> { let poly_data_index = self.infer_nodes[outer_node_index].poly_data_index; let poly_data = &mut self.poly_data[poly_data_index as usize]; let poly_data_type = poly_data.expr_types.get_type_mut(poly_data_type_index); let associated_type: *mut _ = &mut self.infer_nodes[associated_node_index].expr_type; let inference_result = unsafe{ // Safety: pointers originate from different vectors, so cannot // alias. let poly_data_type: *mut _ = poly_data_type; InferenceType::infer_subtrees_for_both_types( poly_data_type, poly_data_start_index, associated_type, associated_node_start_index ) }; let modified_poly_data = inference_result.modified_lhs(); let modified_associated = inference_result.modified_rhs(); if inference_result == DualInferenceResult::Incompatible { let outer_node_expr_id = self.infer_nodes[outer_node_index].expr_id; let outer_node_span = ctx.heap[outer_node_expr_id].full_span(); let detailed_span = ctx.heap[error_location_expr_id].full_span(); let outer_node_type = poly_data_type.display_name(&ctx.heap); let associated_type = self.infer_nodes[associated_node_index].expr_type.display_name(&ctx.heap); let source = &ctx.module().source; return Err(ParseError::new_error_str_at_span( source, outer_node_span, "failed to resolve the types of this expression" ).with_info_str_at_span( source, detailed_span, &format!( "because the {} type has been resolved to '{}', but this expression has been resolved to '{}'", error_type_name, outer_node_type, associated_type ) )); } if modified_poly_data { debug_assert!(poly_data_type.has_marker); // Go through markers for polymorphic variables and use the // (hopefully) more specific types to update their representation // in the PolyData struct for (poly_var_index, poly_var_section) in poly_data_type.marker_iter() { let poly_var_type = &mut poly_data.poly_vars[poly_var_index as usize]; match InferenceType::infer_subtree_for_single_type(poly_var_type, 0, poly_var_section, 0, false) { SingleInferenceResult::Modified => { poly_progress_section.push_unique(poly_var_index); }, SingleInferenceResult::Unmodified => { // nothing to do }, SingleInferenceResult::Incompatible => { return Err(Self::construct_poly_arg_error( ctx, &self.poly_data[poly_data_index as usize], self.infer_nodes[outer_node_index].expr_id )); } } } } return Ok((modified_poly_data, modified_associated)); } /// After calling `apply_polydata_equal2_constraint` on several expressions /// that are associated with some kind of polymorphic expression, several of /// the polymorphic variables might have been inferred to more specific /// types than before. /// /// At this point one should call this function to apply the progress in /// these polymorphic variables back onto the types that are functions of /// these polymorphic variables. /// /// An example: a struct literal with a polymorphic variable `T` may have /// two fields `foo` and `bar` each with different types that are a function /// of the polymorhic variable `T`. If the expressions constructing the /// value for the field `foo` causes the type `T` to progress, then we can /// also progress the type of the expression that constructs `bar`. /// /// And so we have `outer_node_index` + `poly_data_type_index` pointing to /// the appropriate type in the `PolyData` struct. Which will be updated /// first using the polymorphic variables. If we happen to have updated that /// type, then we should also progress the associated expression, hence the /// `associated_node_index`. fn apply_polydata_polyvar_constraint( &mut self, _ctx: &Ctx, outer_node_index: InferNodeIndex, poly_data_type_index: PolyDataTypeIndex, associated_node_index: InferNodeIndex, poly_progress_section: &ScopedSection ) -> bool { let poly_data_index = self.infer_nodes[outer_node_index].poly_data_index; let poly_data = &mut self.poly_data[poly_data_index as usize]; // Early exit, most common case (literals or functions calls which are // actually not polymorphic) if !poly_data.first_rule_application && poly_progress_section.len() == 0 { return false; } // safety: we're borrowing from two distinct fields, so should be fine let poly_data_type = poly_data.expr_types.get_type_mut(poly_data_type_index); let mut last_start_index = 0; let mut modified_poly_type = false; while let Some((poly_var_index, poly_var_start_index)) = poly_data_type.find_marker(last_start_index) { let poly_var_end_index = InferenceType::find_subtree_end_idx(&poly_data_type.parts, poly_var_start_index); if poly_data.first_rule_application || poly_progress_section.contains(&poly_var_index) { // We have updated this polymorphic variable, so try updating it // in the PolyData type let modified_in_poly_data = match InferenceType::infer_subtree_for_single_type( poly_data_type, poly_var_start_index, &poly_data.poly_vars[poly_var_index as usize].parts, 0, false ) { SingleInferenceResult::Modified => true, SingleInferenceResult::Unmodified => false, SingleInferenceResult::Incompatible => { // practically impossible: before calling this function we gather all the // data on the polymorphic variables from the associated expressions. So if // the polymorphic variables in those expressions were not mutually // compatible, we must have encountered that error already. unreachable!() }, }; modified_poly_type = modified_poly_type || modified_in_poly_data; } last_start_index = poly_var_end_index; } if modified_poly_type { let associated_type = &mut self.infer_nodes[associated_node_index].expr_type; match InferenceType::infer_subtree_for_single_type( associated_type, 0, &poly_data_type.parts, 0, true ) { SingleInferenceResult::Modified => return true, SingleInferenceResult::Unmodified => return false, SingleInferenceResult::Incompatible => unreachable!(), // same as above } } else { // Did not update associated type return false; } } /// Should be called after completing one full round of applying polydata /// constraints. fn finish_polydata_constraint(&mut self, outer_node_index: InferNodeIndex) { let poly_data_index = self.infer_nodes[outer_node_index].poly_data_index; let poly_data = &mut self.poly_data[poly_data_index as usize]; poly_data.first_rule_application = false; } /// Applies a type constraint that expects all three provided types to be /// equal. In case we can make progress in inferring the types then we /// attempt to do so. If the call is successful then the composition of all /// types is made equal. fn apply_equal3_constraint( &mut self, ctx: &Ctx, node_index: InferNodeIndex, arg1_index: InferNodeIndex, arg2_index: InferNodeIndex, start_idx: usize ) -> Result<(bool, bool, bool), ParseError> { // Safety: all indices are unique // containers may not be modified let expr_type: *mut _ = &mut self.infer_nodes[node_index].expr_type; let arg1_type: *mut _ = &mut self.infer_nodes[arg1_index].expr_type; let arg2_type: *mut _ = &mut self.infer_nodes[arg2_index].expr_type; let expr_res = unsafe{ InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx) }; if expr_res == DualInferenceResult::Incompatible { return Err(self.construct_expr_type_error(ctx, node_index, arg1_index)); } let args_res = unsafe{ InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) }; if args_res == DualInferenceResult::Incompatible { return Err(self.construct_arg_type_error(ctx, node_index, arg1_index, arg2_index)); } // If all types are compatible, but the second call caused the arg1_type // to be expanded, then we must also assign this to expr_type. let mut progress_expr = expr_res.modified_lhs(); let mut progress_arg1 = expr_res.modified_rhs(); let progress_arg2 = args_res.modified_rhs(); if args_res.modified_lhs() { unsafe { let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx); let subtree = &((*arg2_type).parts[start_idx..end_idx]); (*expr_type).replace_subtree(start_idx, subtree); } progress_expr = true; progress_arg1 = true; } Ok((progress_expr, progress_arg1, progress_arg2)) } /// Applies equal constraint to N consecutive expressions. The returned /// `progress` vec will contain which expressions were progressed and will /// have length N. fn apply_equal_n_constraint( &mut self, ctx: &Ctx, outer_node_index: InferNodeIndex, arguments: &ScopedSection, progress: &mut ScopedSection ) -> Result<(), ParseError> { // Depending on the argument perform an early exit. This simplifies // later logic debug_assert_eq!(progress.len(), 0); match arguments.len() { 0 => { // nothing to progress return Ok(()) }, 1 => { // only one type, so nothing to infer progress.push(false); return Ok(()) }, n => { for _ in 0..n { progress.push(false); } } } // We'll start doing pairwise inference for all of the inference nodes // (node[0] with node[1], then node[1] with node[2], then node[2] ..., // etc.), so when we're at the end we have `node[N-1]` as the most // progressed type. let mut last_index_requiring_inference = 0; for prev_argument_index in 0..arguments.len() - 1 { let next_argument_index = prev_argument_index + 1; let prev_node_index = arguments[prev_argument_index]; let next_node_index = arguments[next_argument_index]; let (prev_progress, next_progress) = self.apply_equal2_constraint( ctx, outer_node_index, prev_node_index, 0, next_node_index, 0 )?; if prev_progress { // Previous node is progress, so every type in front of it needs // to be reinferred. progress[prev_argument_index] = true; last_index_requiring_inference = prev_argument_index; } progress[next_argument_index] = next_progress; } // Apply inference using the most progressed type (the last one) to the // ones that did not obtain this information during the inference // process. let last_argument_node_index = arguments[arguments.len() - 1]; let last_argument_type: *mut _ = &mut self.infer_nodes[last_argument_node_index].expr_type; for argument_index in 0..last_index_requiring_inference { // We can cheat, we know the LHS is less specific than the right // hand side, so: let argument_node_index = arguments[argument_index]; let argument_type = &mut self.infer_nodes[argument_node_index].expr_type; unsafe { // safety: we're dealing with different vectors, so cannot alias argument_type.replace_subtree(0, &(*last_argument_type).parts); } progress[argument_index] = true; } return Ok(()); } /// Determines the `InferenceType` for the expression based on the /// expression parent (this is not done if the parent is a regular 'ol /// expression). Expects `parent_index` to be set to the parent of the /// inference node that is created here. fn insert_initial_inference_node( &mut self, ctx: &mut Ctx, expr_id: ExpressionId ) -> Result { use ExpressionParent as EP; use InferenceTypePart as ITP; // Set the initial inference type based on the expression parent. let expr = &ctx.heap[expr_id]; let inference_type = match expr.parent() { EP::None => // Should have been set by linker unreachable!(), EP::Memory(_) | EP::ExpressionStmt(_) => // Determined during type inference InferenceType::new(false, false, vec![ITP::Unknown]), EP::Expression(parent_id, idx_in_parent) => { // If we are the test expression of a conditional expression, // then we must resolve to a boolean let is_conditional = if let Expression::Conditional(_) = &ctx.heap[*parent_id] { true } else { false }; if is_conditional && *idx_in_parent == 0 { InferenceType::new(false, true, vec![ITP::Bool]) } else { InferenceType::new(false, false, vec![ITP::Unknown]) } }, EP::If(_) | EP::While(_) => // Must be a boolean InferenceType::new(false, true, vec![ITP::Bool]), EP::Return(_) => { // Must match the return type of the function debug_assert_eq!(self.procedure_kind, ProcedureKind::Function); let returned = &ctx.heap[self.procedure_id].return_type.as_ref().unwrap(); self.determine_inference_type_from_parser_type_elements(&returned.elements, true) }, EP::New(_) => // Must be a component call, which we assign a "Void" return // type InferenceType::new(false, true, vec![ITP::Void]), }; let infer_index = self.infer_nodes.len() as InferNodeIndex; self.infer_nodes.push(InferenceNode { expr_type: inference_type, expr_id, inference_rule: InferenceRule::Noop, parent_index: self.parent_index, field_index: -1, poly_data_index: -1, info_type_id: TypeId::new_invalid(), info_variant: ExpressionInfoVariant::Generic, }); return Ok(infer_index); } fn insert_initial_call_polymorph_data( &mut self, ctx: &mut Ctx, call_id: CallExpressionId ) -> PolyDataIndex { // Note: the polymorph variables may be partially specified and may // contain references to the wrapping definition's (i.e. the proctype // we are currently visiting) polymorphic arguments. // // The arguments of the call may refer to polymorphic variables in the // definition of the function we're calling, not of the wrapping // definition. We insert markers in these inferred types to be able to // map them back and forth to the polymorphic arguments of the function // we are calling. let call = &ctx.heap[call_id]; // Handle the polymorphic arguments (if there are any) let num_poly_args = call.parser_type.elements[0].variant.num_embedded(); let mut poly_args = Vec::with_capacity(num_poly_args); for embedded_elements in call.parser_type.iter_embedded(0) { poly_args.push(self.determine_inference_type_from_parser_type_elements(embedded_elements, true)); } // Handle the arguments and return types let definition = &ctx.heap[call.procedure]; debug_assert_eq!(poly_args.len(), definition.poly_vars.len()); let mut parameter_types = Vec::with_capacity(definition.parameters.len()); let parameter_section = self.var_buffer.start_section_initialized(&definition.parameters); for parameter_id in parameter_section.iter_copied() { let param = &ctx.heap[parameter_id]; parameter_types.push(self.determine_inference_type_from_parser_type_elements(¶m.parser_type.elements, false)); } parameter_section.forget(); let return_type = match &definition.return_type { None => { // Component, so returns a "Void" debug_assert_ne!(definition.kind, ProcedureKind::Function); InferenceType::new(false, true, vec![InferenceTypePart::Void]) }, Some(returned) => { debug_assert_eq!(definition.kind, ProcedureKind::Function); self.determine_inference_type_from_parser_type_elements(&returned.elements, false) } }; let extra_data_idx = self.poly_data.len() as PolyDataIndex; self.poly_data.push(PolyData { first_rule_application: true, definition_id: call.procedure.upcast(), poly_vars: poly_args, expr_types: PolyDataTypes { associated: parameter_types, returned: return_type } }); return extra_data_idx } fn insert_initial_struct_polymorph_data( &mut self, ctx: &mut Ctx, lit_id: LiteralExpressionId, ) -> PolyDataIndex { use InferenceTypePart as ITP; let literal = ctx.heap[lit_id].value.as_struct(); // Handle polymorphic arguments let num_embedded = literal.parser_type.elements[0].variant.num_embedded(); let mut total_num_poly_parts = 0; let mut poly_args = Vec::with_capacity(num_embedded); for embedded_elements in literal.parser_type.iter_embedded(0) { let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true); total_num_poly_parts += poly_type.parts.len(); poly_args.push(poly_type); } // Handle parser types on struct definition let defined_type = ctx.types.get_base_definition(&literal.definition).unwrap(); let struct_type = defined_type.definition.as_struct(); debug_assert_eq!(poly_args.len(), defined_type.poly_vars.len()); // Note: programmer is capable of specifying fields in a struct literal // in a different order than on the definition. We take the literal- // specified order to be leading. let mut embedded_types = Vec::with_capacity(struct_type.fields.len()); for lit_field in literal.fields.iter() { let def_field = &struct_type.fields[lit_field.field_idx]; let inference_type = self.determine_inference_type_from_parser_type_elements(&def_field.parser_type.elements, false); embedded_types.push(inference_type); } // Return type is the struct type itself, with the appropriate // polymorphic variables. So: // - 1 part for definition // - N_poly_arg marker parts for each polymorphic argument // - all the parts for the currently known polymorphic arguments let parts_reserved = 1 + poly_args.len() + total_num_poly_parts; let mut parts = Vec::with_capacity(parts_reserved); parts.push(ITP::Instance(literal.definition, poly_args.len() as u32)); let mut return_type_done = true; for (poly_var_idx, poly_var) in poly_args.iter().enumerate() { if !poly_var.is_done { return_type_done = false; } parts.push(ITP::Marker(poly_var_idx as u32)); parts.extend(poly_var.parts.iter().cloned()); } debug_assert_eq!(parts.len(), parts_reserved); let return_type = InferenceType::new(!poly_args.is_empty(), return_type_done, parts); let extra_data_index = self.poly_data.len() as PolyDataIndex; self.poly_data.push(PolyData { first_rule_application: true, definition_id: literal.definition, poly_vars: poly_args, expr_types: PolyDataTypes { associated: embedded_types, returned: return_type, }, }); return extra_data_index } /// Inserts the extra polymorphic data struct for enum expressions. These /// can never be determined from the enum itself, but may be inferred from /// the use of the enum. fn insert_initial_enum_polymorph_data( &mut self, ctx: &Ctx, lit_id: LiteralExpressionId ) -> PolyDataIndex { use InferenceTypePart as ITP; let literal = ctx.heap[lit_id].value.as_enum(); // Handle polymorphic arguments to the enum let num_poly_args = literal.parser_type.elements[0].variant.num_embedded(); let mut total_num_poly_parts = 0; let mut poly_args = Vec::with_capacity(num_poly_args); for embedded_elements in literal.parser_type.iter_embedded(0) { let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true); total_num_poly_parts += poly_type.parts.len(); poly_args.push(poly_type); } // Handle enum type itself let parts_reserved = 1 + poly_args.len() + total_num_poly_parts; let mut parts = Vec::with_capacity(parts_reserved); parts.push(ITP::Instance(literal.definition, poly_args.len() as u32)); let mut enum_type_done = true; for (poly_var_idx, poly_var) in poly_args.iter().enumerate() { if !poly_var.is_done { enum_type_done = false; } parts.push(ITP::Marker(poly_var_idx as u32)); parts.extend(poly_var.parts.iter().cloned()); } debug_assert_eq!(parts.len(), parts_reserved); let enum_type = InferenceType::new(!poly_args.is_empty(), enum_type_done, parts); let extra_data_index = self.poly_data.len() as PolyDataIndex; self.poly_data.push(PolyData { first_rule_application: true, definition_id: literal.definition, poly_vars: poly_args, expr_types: PolyDataTypes { associated: Vec::new(), returned: enum_type, }, }); return extra_data_index; } /// Inserts the extra polymorphic data struct for unions. The polymorphic /// arguments may be partially determined from embedded values in the union. fn insert_initial_union_polymorph_data( &mut self, ctx: &Ctx, lit_id: LiteralExpressionId ) -> PolyDataIndex { use InferenceTypePart as ITP; let literal = ctx.heap[lit_id].value.as_union(); // Construct the polymorphic variables let num_poly_args = literal.parser_type.elements[0].variant.num_embedded(); let mut total_num_poly_parts = 0; let mut poly_args = Vec::with_capacity(num_poly_args); for embedded_elements in literal.parser_type.iter_embedded(0) { let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true); total_num_poly_parts += poly_type.parts.len(); poly_args.push(poly_type); } // Handle any of the embedded values in the variant, if specified let definition_id = literal.definition; let type_definition = ctx.types.get_base_definition(&definition_id).unwrap(); let union_definition = type_definition.definition.as_union(); debug_assert_eq!(poly_args.len(), type_definition.poly_vars.len()); let variant_definition = &union_definition.variants[literal.variant_idx]; debug_assert_eq!(variant_definition.embedded.len(), literal.values.len()); let mut embedded = Vec::with_capacity(variant_definition.embedded.len()); for embedded_parser_type in &variant_definition.embedded { let inference_type = self.determine_inference_type_from_parser_type_elements(&embedded_parser_type.elements, false); embedded.push(inference_type); } // Handle the type of the union itself let parts_reserved = 1 + poly_args.len() + total_num_poly_parts; let mut parts = Vec::with_capacity(parts_reserved); parts.push(ITP::Instance(definition_id, poly_args.len() as u32)); let mut union_type_done = true; for (poly_var_idx, poly_var) in poly_args.iter().enumerate() { if !poly_var.is_done { union_type_done = false; } parts.push(ITP::Marker(poly_var_idx as u32)); parts.extend(poly_var.parts.iter().cloned()); } debug_assert_eq!(parts_reserved, parts.len()); let union_type = InferenceType::new(!poly_args.is_empty(), union_type_done, parts); let extra_data_index = self.poly_data.len() as isize; self.poly_data.push(PolyData { first_rule_application: true, definition_id: literal.definition, poly_vars: poly_args, expr_types: PolyDataTypes { associated: embedded, returned: union_type, }, }); return extra_data_index; } /// Inserts the extra polymorphic data struct. Assumes that the select /// expression's referenced (definition_id, field_idx) has been resolved. fn insert_initial_select_polymorph_data( &mut self, ctx: &Ctx, node_index: InferNodeIndex, struct_def_id: DefinitionId ) -> PolyDataIndex { use InferenceTypePart as ITP; let definition = ctx.heap[struct_def_id].as_struct(); let node = &self.infer_nodes[node_index]; let field_index = node.field_index as usize; // Generate initial polyvar types and struct type // TODO: @Performance: we can immediately set the polyvars of the subject's struct type let num_poly_vars = definition.poly_vars.len(); let mut poly_vars = Vec::with_capacity(num_poly_vars); let struct_parts_reserved = 1 + 2 * num_poly_vars; let mut struct_parts = Vec::with_capacity(struct_parts_reserved); struct_parts.push(ITP::Instance(struct_def_id, num_poly_vars as u32)); for poly_idx in 0..num_poly_vars { poly_vars.push(InferenceType::new(true, false, vec![ ITP::Marker(poly_idx as u32), ITP::Unknown, ])); struct_parts.push(ITP::Marker(poly_idx as u32)); struct_parts.push(ITP::Unknown); } debug_assert_eq!(struct_parts.len(), struct_parts_reserved); // Generate initial field type let field_type = self.determine_inference_type_from_parser_type_elements(&definition.fields[field_index].parser_type.elements, false); let extra_data_index = self.poly_data.len() as PolyDataIndex; self.poly_data.push(PolyData { first_rule_application: true, definition_id: struct_def_id, poly_vars, expr_types: PolyDataTypes { associated: vec![InferenceType::new(num_poly_vars != 0, num_poly_vars == 0, struct_parts)], returned: field_type, }, }); return extra_data_index; } /// Determines the initial InferenceType from the provided ParserType. This /// may be called with two kinds of intentions: /// 1. To resolve a ParserType within the body of a function, or on /// polymorphic arguments to calls/instantiations within that body. This /// means that the polymorphic variables are known and can be replaced /// with the monomorph we're instantiating. /// 2. To resolve a ParserType on a called function's definition or on /// an instantiated datatype's members. This means that the polymorphic /// arguments inside those ParserTypes refer to the polymorphic /// variables in the called/instantiated type's definition. /// In the second case we place InferenceTypePart::Marker instances such /// that we can perform type inference on the polymorphic variables. fn determine_inference_type_from_parser_type_elements( &mut self, elements: &[ParserTypeElement], use_definitions_known_poly_args: bool ) -> InferenceType { use ParserTypeVariant as PTV; use InferenceTypePart as ITP; let mut infer_type = Vec::with_capacity(elements.len()); let mut has_inferred = false; let mut has_markers = false; for element in elements { match &element.variant { // Compiler-only types PTV::Void => { infer_type.push(ITP::Void); }, PTV::InputOrOutput => { infer_type.push(ITP::PortLike); has_inferred = true }, PTV::ArrayLike => { infer_type.push(ITP::ArrayLike); has_inferred = true }, PTV::IntegerLike => { infer_type.push(ITP::IntegerLike); has_inferred = true }, // Builtins PTV::Message => { // TODO: @types Remove the Message -> Byte hack at some point... infer_type.push(ITP::Message); infer_type.push(ITP::UInt8); }, PTV::Bool => { infer_type.push(ITP::Bool); }, PTV::UInt8 => { infer_type.push(ITP::UInt8); }, PTV::UInt16 => { infer_type.push(ITP::UInt16); }, PTV::UInt32 => { infer_type.push(ITP::UInt32); }, PTV::UInt64 => { infer_type.push(ITP::UInt64); }, PTV::SInt8 => { infer_type.push(ITP::SInt8); }, PTV::SInt16 => { infer_type.push(ITP::SInt16); }, PTV::SInt32 => { infer_type.push(ITP::SInt32); }, PTV::SInt64 => { infer_type.push(ITP::SInt64); }, PTV::Character => { infer_type.push(ITP::Character); }, PTV::String => { infer_type.push(ITP::String); infer_type.push(ITP::Character); }, // Special markers PTV::IntegerLiteral => { unreachable!("integer literal type on variable type"); }, PTV::Inferred => { infer_type.push(ITP::Unknown); has_inferred = true; }, // With nested types PTV::Array => { infer_type.push(ITP::Array); }, PTV::Input => { infer_type.push(ITP::Input); }, PTV::Output => { infer_type.push(ITP::Output); }, PTV::Tuple(num_embedded) => { infer_type.push(ITP::Tuple(*num_embedded)); }, PTV::PolymorphicArgument(belongs_to_definition, poly_arg_idx) => { let poly_arg_idx = *poly_arg_idx; if use_definitions_known_poly_args { // Refers to polymorphic argument on procedure we're currently processing. // This argument is already known. debug_assert_eq!(*belongs_to_definition, self.procedure_id.upcast()); debug_assert!((poly_arg_idx as usize) < self.poly_vars.len()); Self::determine_inference_type_from_concrete_type( &mut infer_type, &self.poly_vars[poly_arg_idx as usize].parts ); } else { // Polymorphic argument has to be inferred has_markers = true; has_inferred = true; infer_type.push(ITP::Marker(poly_arg_idx)); infer_type.push(ITP::Unknown) } }, PTV::Definition(definition_id, num_embedded) => { infer_type.push(ITP::Instance(*definition_id, *num_embedded)); } } } InferenceType::new(has_markers, !has_inferred, infer_type) } /// Determines the inference type from an already concrete type. Applies the /// various type "hacks" inside the type inferencer. fn determine_inference_type_from_concrete_type(parser_type: &mut Vec, concrete_type: &[ConcreteTypePart]) { use InferenceTypePart as ITP; use ConcreteTypePart as CTP; for concrete_part in concrete_type { match concrete_part { CTP::Void => parser_type.push(ITP::Void), CTP::Message => { parser_type.push(ITP::Message); parser_type.push(ITP::UInt8) }, CTP::Bool => parser_type.push(ITP::Bool), CTP::UInt8 => parser_type.push(ITP::UInt8), CTP::UInt16 => parser_type.push(ITP::UInt16), CTP::UInt32 => parser_type.push(ITP::UInt32), CTP::UInt64 => parser_type.push(ITP::UInt64), CTP::SInt8 => parser_type.push(ITP::SInt8), CTP::SInt16 => parser_type.push(ITP::SInt16), CTP::SInt32 => parser_type.push(ITP::SInt32), CTP::SInt64 => parser_type.push(ITP::SInt64), CTP::Character => parser_type.push(ITP::Character), CTP::String => { parser_type.push(ITP::String); parser_type.push(ITP::Character) }, CTP::Array => parser_type.push(ITP::Array), CTP::Slice => parser_type.push(ITP::Slice), CTP::Input => parser_type.push(ITP::Input), CTP::Output => parser_type.push(ITP::Output), CTP::Pointer => unreachable!("pointer type during concrete to inference type conversion"), CTP::Tuple(num) => parser_type.push(ITP::Tuple(*num)), CTP::Instance(id, num) => parser_type.push(ITP::Instance(*id, *num)), CTP::Function(_, _) => unreachable!("function type during concrete to inference type conversion"), CTP::Component(_, _) => unreachable!("component type during concrete to inference type conversion"), } } } /// Construct an error when an expression's type does not match. This /// happens if we infer the expression type from its arguments (e.g. the /// expression type of an addition operator is the type of the arguments) /// But the expression type was already set due to our parent (e.g. an /// "if statement" or a "logical not" always expecting a boolean) fn construct_expr_type_error( &self, ctx: &Ctx, expr_index: InferNodeIndex, arg_index: InferNodeIndex ) -> ParseError { // TODO: Expand and provide more meaningful information for humans let expr_node = &self.infer_nodes[expr_index]; let arg_node = &self.infer_nodes[arg_index]; let expr = &ctx.heap[expr_node.expr_id]; let arg = &ctx.heap[arg_node.expr_id]; return ParseError::new_error_at_span( &ctx.module().source, expr.operation_span(), format!( "incompatible types: this expression expected a '{}'", expr_node.expr_type.display_name(&ctx.heap) ) ).with_info_at_span( &ctx.module().source, arg.full_span(), format!( "but this expression yields a '{}'", arg_node.expr_type.display_name(&ctx.heap) ) ) } fn construct_arg_type_error( &self, ctx: &Ctx, expr_index: InferNodeIndex, arg1_index: InferNodeIndex, arg2_index: InferNodeIndex ) -> ParseError { let arg1_node = &self.infer_nodes[arg1_index]; let arg2_node = &self.infer_nodes[arg2_index]; let expr_id = self.infer_nodes[expr_index].expr_id; let expr = &ctx.heap[expr_id]; let arg1 = &ctx.heap[arg1_node.expr_id]; let arg2 = &ctx.heap[arg2_node.expr_id]; return ParseError::new_error_str_at_span( &ctx.module().source, expr.operation_span(), "incompatible types: cannot apply this expression" ).with_info_at_span( &ctx.module().source, arg1.full_span(), format!( "Because this expression has type '{}'", arg1_node.expr_type.display_name(&ctx.heap) ) ).with_info_at_span( &ctx.module().source, arg2.full_span(), format!( "But this expression has type '{}'", arg2_node.expr_type.display_name(&ctx.heap) ) ) } fn construct_template_type_error( &self, ctx: &Ctx, node_index: InferNodeIndex, template: &[InferenceTypePart] ) -> ParseError { let node = &self.infer_nodes[node_index]; let expr = &ctx.heap[node.expr_id]; let expr_type = &node.expr_type; return ParseError::new_error_at_span( &ctx.module().source, expr.full_span(), format!( "incompatible types: got a '{}' but expected a '{}'", expr_type.display_name(&ctx.heap), InferenceType::partial_display_name(&ctx.heap, template) ) ) } fn construct_variable_type_error( &self, ctx: &Ctx, node_index: InferNodeIndex, ) -> ParseError { let node = &self.infer_nodes[node_index]; let rule = node.inference_rule.as_variable_expr(); let var_data = &self.var_data[rule.var_data_index]; let var_decl = &ctx.heap[var_data.var_id]; let var_expr = &ctx.heap[node.expr_id]; return ParseError::new_error_at_span( &ctx.module().source, var_decl.identifier.span, format!( "conflicting types for this variable, previously assigned the type '{}'", var_data.var_type.display_name(&ctx.heap) ) ).with_info_at_span( &ctx.module().source, var_expr.full_span(), format!( "but inferred to have incompatible type '{}' here", node.expr_type.display_name(&ctx.heap) ) ); } /// Constructs a human interpretable error in the case that type inference /// on a polymorphic variable to a function call or literal construction /// failed. This may only be caused by a pair of inference types (which may /// come from arguments or the return type) having two different inferred /// values for that polymorphic variable. /// /// So we find this pair and construct the error using it. /// /// We assume that the expression is a function call or a struct literal, /// and that an actual error has occurred. fn construct_poly_arg_error( ctx: &Ctx, poly_data: &PolyData, expr_id: ExpressionId ) -> ParseError { // Helper function to check for polymorph mismatch between two inference // types. fn has_poly_mismatch<'a>(type_a: &'a InferenceType, type_b: &'a InferenceType) -> Option<(u32, &'a [InferenceTypePart], &'a [InferenceTypePart])> { if !type_a.has_marker || !type_b.has_marker { return None } for (marker_a, section_a) in type_a.marker_iter() { for (marker_b, section_b) in type_b.marker_iter() { if marker_a != marker_b { // Not the same polymorphic variable continue; } if !InferenceType::check_subtrees(section_a, 0, section_b, 0) { // Not compatible return Some((marker_a, section_a, section_b)) } } } None } // Helper function to check for polymorph mismatch between an inference // type and the polymorphic variables in the poly_data struct. fn has_explicit_poly_mismatch<'a>( poly_vars: &'a [InferenceType], arg: &'a InferenceType ) -> Option<(u32, &'a [InferenceTypePart], &'a [InferenceTypePart])> { for (marker, section) in arg.marker_iter() { debug_assert!((marker as usize) < poly_vars.len()); let poly_section = &poly_vars[marker as usize].parts; if !InferenceType::check_subtrees(poly_section, 0, section, 0) { return Some((marker, poly_section, section)) } } None } // Helpers function to retrieve polyvar name and definition name fn get_poly_var_and_definition_name<'a>(ctx: &'a Ctx, poly_var_idx: u32, definition_id: DefinitionId) -> (&'a str, &'a str) { let definition = &ctx.heap[definition_id]; let poly_var = definition.poly_vars()[poly_var_idx as usize].value.as_str(); let func_name = definition.identifier().value.as_str(); (poly_var, func_name) } // Helper function to construct initial error fn construct_main_error(ctx: &Ctx, poly_data: &PolyData, poly_var_idx: u32, expr: &Expression) -> ParseError { match expr { Expression::Call(expr) => { let (poly_var, func_name) = get_poly_var_and_definition_name(ctx, poly_var_idx, poly_data.definition_id); return ParseError::new_error_at_span( &ctx.module().source, expr.func_span, format!( "Conflicting type for polymorphic variable '{}' of '{}'", poly_var, func_name ) ) }, Expression::Literal(expr) => { let (poly_var, type_name) = get_poly_var_and_definition_name(ctx, poly_var_idx, poly_data.definition_id); return ParseError::new_error_at_span( &ctx.module().source, expr.span, format!( "Conflicting type for polymorphic variable '{}' of instantiation of '{}'", poly_var, type_name ) ); }, Expression::Select(expr) => { let (poly_var, struct_name) = get_poly_var_and_definition_name(ctx, poly_var_idx, poly_data.definition_id); let field_name = match &expr.kind { SelectKind::StructField(v) => v, SelectKind::TupleMember(_) => unreachable!(), // because we're constructing a polymorph error, and tuple access does not deal with polymorphs }; return ParseError::new_error_at_span( &ctx.module().source, expr.full_span, format!( "Conflicting type for polymorphic variable '{}' while accessing field '{}' of '{}'", poly_var, field_name.value.as_str(), struct_name ) ) } _ => unreachable!("called construct_poly_arg_error without an expected expression, got: {:?}", expr) } } // Actual checking let expr = &ctx.heap[expr_id]; let (expr_args, expr_return_name) = match expr { Expression::Call(expr) => ( expr.arguments.clone(), "return type" ), Expression::Literal(expr) => { let expressions = match &expr.value { Literal::Struct(v) => v.fields.iter() .map(|f| f.value) .collect(), Literal::Enum(_) => Vec::new(), Literal::Union(v) => v.values.clone(), _ => unreachable!() }; ( expressions, "literal" ) }, Expression::Select(expr) => // Select expression uses the polymorphic variables of the // struct it is accessing, so get the subject expression. ( vec![expr.subject], "selected field" ), _ => unreachable!(), }; // - check return type with itself if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch( &poly_data.expr_types.returned, &poly_data.expr_types.returned ) { return construct_main_error(ctx, poly_data, poly_idx, expr) .with_info_at_span( &ctx.module().source, expr.full_span(), format!( "The {} inferred the conflicting types '{}' and '{}'", expr_return_name, InferenceType::partial_display_name(&ctx.heap, section_a), InferenceType::partial_display_name(&ctx.heap, section_b) ) ); } // - check arguments with each other argument and with return type for (arg_a_idx, arg_a) in poly_data.expr_types.associated.iter().enumerate() { for (arg_b_idx, arg_b) in poly_data.expr_types.associated.iter().enumerate() { if arg_b_idx > arg_a_idx { break; } if let Some((poly_idx, section_a, section_b)) = has_poly_mismatch(&arg_a, &arg_b) { let error = construct_main_error(ctx, poly_data, poly_idx, expr); if arg_a_idx == arg_b_idx { // Same argument let arg = &ctx.heap[expr_args[arg_a_idx]]; return error.with_info_at_span( &ctx.module().source, arg.full_span(), format!( "This argument inferred the conflicting types '{}' and '{}'", InferenceType::partial_display_name(&ctx.heap, section_a), InferenceType::partial_display_name(&ctx.heap, section_b) ) ); } else { let arg_a = &ctx.heap[expr_args[arg_a_idx]]; let arg_b = &ctx.heap[expr_args[arg_b_idx]]; return error.with_info_at_span( &ctx.module().source, arg_a.full_span(), format!( "This argument inferred it to '{}'", InferenceType::partial_display_name(&ctx.heap, section_a) ) ).with_info_at_span( &ctx.module().source, arg_b.full_span(), format!( "While this argument inferred it to '{}'", InferenceType::partial_display_name(&ctx.heap, section_b) ) ) } } } // Check with return type if let Some((poly_idx, section_arg, section_ret)) = has_poly_mismatch(arg_a, &poly_data.expr_types.returned) { let arg = &ctx.heap[expr_args[arg_a_idx]]; return construct_main_error(ctx, poly_data, poly_idx, expr) .with_info_at_span( &ctx.module().source, arg.full_span(), format!( "This argument inferred it to '{}'", InferenceType::partial_display_name(&ctx.heap, section_arg) ) ) .with_info_at_span( &ctx.module().source, expr.full_span(), format!( "While the {} inferred it to '{}'", expr_return_name, InferenceType::partial_display_name(&ctx.heap, section_ret) ) ); } } // Now check against the explicitly specified polymorphic variables (if // any). for (arg_idx, arg) in poly_data.expr_types.associated.iter().enumerate() { if let Some((poly_idx, poly_section, arg_section)) = has_explicit_poly_mismatch(&poly_data.poly_vars, arg) { let arg = &ctx.heap[expr_args[arg_idx]]; return construct_main_error(ctx, poly_data, poly_idx, expr) .with_info_at_span( &ctx.module().source, arg.full_span(), format!( "The polymorphic variable has type '{}' (which might have been partially inferred) while the argument inferred it to '{}'", InferenceType::partial_display_name(&ctx.heap, poly_section), InferenceType::partial_display_name(&ctx.heap, arg_section) ) ); } } if let Some((poly_idx, poly_section, ret_section)) = has_explicit_poly_mismatch(&poly_data.poly_vars, &poly_data.expr_types.returned) { return construct_main_error(ctx, poly_data, poly_idx, expr) .with_info_at_span( &ctx.module().source, expr.full_span(), format!( "The polymorphic variable has type '{}' (which might have been partially inferred) while the {} inferred it to '{}'", InferenceType::partial_display_name(&ctx.heap, poly_section), expr_return_name, InferenceType::partial_display_name(&ctx.heap, ret_section) ) ) } unreachable!("construct_poly_arg_error without actual error found?") } } fn get_tuple_size_from_inference_type(inference_type: &InferenceType) -> Result, ()> { for part in &inference_type.parts { if part.is_marker() { continue; } if !part.is_concrete() { break; } if let InferenceTypePart::Tuple(size) = part { return Ok(Some(*size)); } else { return Err(()); // not a tuple! } } return Ok(None); } #[cfg(test)] mod tests { use super::*; use crate::protocol::arena::Id; use InferenceTypePart as ITP; use InferenceType as IT; #[test] fn test_single_part_inference() { // lhs argument inferred from rhs let pairs = [ (ITP::NumberLike, ITP::UInt8), (ITP::IntegerLike, ITP::SInt32), (ITP::Unknown, ITP::UInt64), (ITP::Unknown, ITP::Bool) ]; for (lhs, rhs) in pairs.iter() { // Using infer-both let mut lhs_type = IT::new(false, false, vec![lhs.clone()]); let mut rhs_type = IT::new(false, true, vec![rhs.clone()]); let result = unsafe{ IT::infer_subtrees_for_both_types( &mut lhs_type, 0, &mut rhs_type, 0 ) }; assert_eq!(DualInferenceResult::First, result); assert_eq!(lhs_type.parts, rhs_type.parts); // Using infer-single let mut lhs_type = IT::new(false, false, vec![lhs.clone()]); let rhs_type = IT::new(false, true, vec![rhs.clone()]); let result = IT::infer_subtree_for_single_type( &mut lhs_type, 0, &rhs_type.parts, 0, false ); assert_eq!(SingleInferenceResult::Modified, result); assert_eq!(lhs_type.parts, rhs_type.parts); } } #[test] fn test_multi_part_inference() { let pairs = [ (vec![ITP::ArrayLike, ITP::NumberLike], vec![ITP::Slice, ITP::SInt8]), (vec![ITP::Unknown], vec![ITP::Input, ITP::Array, ITP::String, ITP::Character]), (vec![ITP::PortLike, ITP::SInt32], vec![ITP::Input, ITP::SInt32]), (vec![ITP::Unknown], vec![ITP::Output, ITP::SInt32]), ( vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Unknown, ITP::Output, ITP::Unknown], vec![ITP::Instance(Id::new(0), 2), ITP::Input, ITP::Array, ITP::SInt32, ITP::Output, ITP::SInt32] ) ]; for (lhs, rhs) in pairs.iter() { let mut lhs_type = IT::new(false, false, lhs.clone()); let mut rhs_type = IT::new(false, true, rhs.clone()); let result = unsafe{ IT::infer_subtrees_for_both_types( &mut lhs_type, 0, &mut rhs_type, 0 ) }; assert_eq!(DualInferenceResult::First, result); assert_eq!(lhs_type.parts, rhs_type.parts); let mut lhs_type = IT::new(false, false, lhs.clone()); let rhs_type = IT::new(false, true, rhs.clone()); let result = IT::infer_subtree_for_single_type( &mut lhs_type, 0, &rhs_type.parts, 0, false ); assert_eq!(SingleInferenceResult::Modified, result); assert_eq!(lhs_type.parts, rhs_type.parts) } } }