Changeset - 2df3ee19d578
[Not reviewed]
0 3 0
MH - 4 years ago 2021-12-16 11:34:22
contact@maxhenger.nl
Remove more hot-loop allocations
3 files changed with 101 insertions and 58 deletions:
0 comments (0 inline, 0 general)
src/collections/scoped_buffer.rs
Show inline comments
 
/// scoped_buffer.rs
 
///
 
/// Solves the common pattern where we are performing some kind of recursive
 
/// pattern while using a temporary buffer. At the start, or during the
 
/// procedure, we push stuff into the buffer. At the end we take out what we
 
/// have put in.
 
///
 
/// It is unsafe because we're using pointers to circumvent borrowing rules in
 
/// the name of code cleanliness. The correctness of use is checked in debug
 
/// mode.
 
/// mode at runtime.
 

	
 
use std::iter::FromIterator;
 

	
 
pub(crate) struct ScopedBuffer<T: Sized> {
 
    pub inner: Vec<T>,
 
macro_rules! hide {
 
    ($v:block) => {
 
        #[cfg(debug_assertions)] $v
 
    };
 
    ($v:expr) => {
 
        #[cfg(debug_assertions)] $v
 
    };
 
}
 

	
 
/// A section of the buffer. Keeps track of where we started the section. When
 
/// done with the section one must call `into_vec` or `forget` to remove the
 
/// section from the underlying buffer. This will also be done upon dropping the
 
/// ScopedSection in case errors are being handled.
 
pub(crate) struct ScopedSection<T: Sized> {
 
    inner: *mut Vec<T>,
 
    start_size: u32,
 
    #[cfg(debug_assertions)] cur_size: u32,
 
pub(crate) struct ScopedBuffer<T: Sized> {
 
    pub inner: Vec<T>,
 
}
 

	
 
impl<T: Sized> ScopedBuffer<T> {
 
    pub(crate) fn with_capacity(capacity: usize) -> Self {
 
        Self { inner: Vec::with_capacity(capacity) }
 
        Self {
 
            inner: Vec::with_capacity(capacity),
 
        }
 
    }
 

	
 
    pub(crate) fn start_section(&mut self) -> ScopedSection<T> {
 
        let start_size = self.inner.len() as u32;
 
        ScopedSection {
 
            inner: &mut self.inner,
 
            start_size,
 
            #[cfg(debug_assertions)] cur_size: start_size
 
            #[cfg(debug_assertions)] cur_size: start_size,
 
        }
 
    }
 
}
 

	
 
impl<T: Clone> ScopedBuffer<T> {
 
    pub(crate) fn start_section_initialized(&mut self, initialize_with: &[T]) -> ScopedSection<T> {
 
        let start_size = self.inner.len() as u32;
 
        let _data_size = initialize_with.len() as u32;
 
        self.inner.extend_from_slice(initialize_with);
 
        ScopedSection{
 
            inner: &mut self.inner,
 
            start_size,
 
            #[cfg(debug_assertions)] cur_size: start_size + _data_size,
 
        }
 
    }
 
}
 

	
 
#[cfg(debug_assertions)]
 
impl<T: Sized> Drop for ScopedBuffer<T> {
 
    fn drop(&mut self) {
 
        // Make sure that everyone cleaned up the buffer neatly
 
        debug_assert!(self.inner.is_empty(), "dropped non-empty scoped buffer");
 
    }
 
}
 

	
 
/// A section of the buffer. Keeps track of where we started the section. When
 
/// done with the section one must call `into_vec` or `forget` to remove the
 
/// section from the underlying buffer. This will also be done upon dropping the
 
/// ScopedSection in case errors are being handled.
 
pub(crate) struct ScopedSection<T: Sized> {
 
    inner: *mut Vec<T>,
 
    start_size: u32,
 
    #[cfg(debug_assertions)] cur_size: u32,
 
}
 

	
 
impl<T: Sized> ScopedSection<T> {
 
    #[inline]
 
    pub(crate) fn push(&mut self, value: T) {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to push onto section, but size is larger than expected");
 
        hide!(debug_assert_eq!(
 
            vec.len(), self.cur_size as usize,
 
            "trying to push onto section, but size is larger than expected"
 
        ));
 
        vec.push(value);
 
        #[cfg(debug_assertions)] { self.cur_size += 1; }
 
        hide!(self.cur_size += 1);
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn len(&self) -> usize {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to get section length, but size is larger than expected");
 
        hide!(debug_assert_eq!(
 
            vec.len(), self.cur_size as usize,
 
            "trying to get section length, but size is larger than expected"
 
        ));
 
        return vec.len() - self.start_size as usize;
 
    }
 

	
 
    #[inline]
 
    #[allow(unused_mut)] // used in debug mode
 
    pub(crate) fn forget(mut self) {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] {
 
        hide!({
 
            debug_assert_eq!(
 
                vec.len(), self.cur_size as usize,
 
                "trying to forget section, but size is larger than expected"
 
            );
 
            self.cur_size = self.start_size;
 
        }
 
        });
 
        vec.truncate(self.start_size as usize);
 
    }
 

	
 
    #[inline]
 
    #[allow(unused_mut)] // used in debug mode
 
    pub(crate) fn into_vec(mut self) -> Vec<T> {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)]  {
 
        hide!({
 
            debug_assert_eq!(
 
                vec.len(), self.cur_size as usize,
 
                "trying to turn section into vec, but size is larger than expected"
 
            );
 
            self.cur_size = self.start_size;
 
        }
 
        });
 
        let section = Vec::from_iter(vec.drain(self.start_size as usize..));
 
        section
 
    }
 
}
 

	
 
impl<T: Copy> ScopedSection<T> {
 
    pub(crate) fn iter_copied(&self) -> ScopedIter<T> {
 
        return ScopedIter{
 
            inner: self.inner,
 
            cur_index: self.start_size,
 
            last_index: unsafe{ (*self.inner).len() as u32 },
 
        }
 
    }
 
}
 

	
 
impl<T: Sized> std::ops::Index<usize> for ScopedSection<T> {
 
impl<T> std::ops::Index<usize> for ScopedSection<T> {
 
    type Output = T;
 

	
 
    fn index(&self, idx: usize) -> &Self::Output {
 
    fn index(&self, index: usize) -> &Self::Output {
 
        let vec = unsafe{&*self.inner};
 
        return &vec[self.start_size as usize + idx]
 
        return &vec[self.start_size as usize + index]
 
    }
 
}
 

	
 
impl<T> std::ops::IndexMut<usize> for ScopedSection<T> {
 
    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
 
        let vec = unsafe{&mut *self.inner};
 
        return &mut vec[self.start_size as usize + index]
 
    }
 
}
 

	
 
#[cfg(debug_assertions)]
 
impl<T: Sized> Drop for ScopedSection<T> {
 
    fn drop(&mut self) {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize);
 
        hide!(debug_assert_eq!(vec.len(), self.cur_size as usize));
 
        vec.truncate(self.start_size as usize);
 
    }
 
}
 

	
 
/// Small utility for iterating over a section of the buffer. Same conditions as
 
/// the buffer apply: each time we retrieve an element the buffer must have the
 
/// same size as the moment of creation.
 
pub(crate) struct ScopedIter<T: Copy> {
 
    inner: *mut Vec<T>,
 
    cur_index: u32,
 
    last_index: u32,
 
}
 

	
 
impl<T: Copy> Iterator for ScopedIter<T> {
 
    type Item = T;
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        hide!(debug_assert_eq!(self.last_index as usize, unsafe { (*self.inner).len() }));
 
        if self.cur_index >= self.last_index {
 
            return None;
 
        }
 

	
 
        let vec = unsafe{ &*self.inner };
 
        let index = self.cur_index as usize;
 
        self.cur_index += 1;
 
        return Some(vec[index]);
 
    }
 
}
 
\ No newline at end of file
src/protocol/parser/pass_typing.rs
Show inline comments
 
@@ -4,97 +4,97 @@
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter case we return an
 
/// error.
 
///
 
/// TODO: Needs a thorough rewrite:
 
///  0. polymorph_progress is intentionally broken at the moment. Make it work
 
///     again and use a normal VecSomething.
 
///  1. The foundation for doing all of the work with predetermined indices
 
///     instead of with HashMaps is there, but it is not really used because of
 
///     time constraints. When time is available, rewrite the system such that
 
///     AST IDs are not needed, and only indices into arrays are used.
 
///  2. We're doing a lot of extra work. It seems better to apply the initial
 
///     type based on expression parents, and immediately apply forced
 
///     constraints (arg to a fires() call must be port-like). All of the \
 
///     progress_xxx calls should then only be concerned with "transmitting"
 
///     type inference across their parent/child expressions.
 
///  3. Remove the `msg` type?
 
///  4. Disallow certain types in certain operations (e.g. `Void`).
 

	
 
macro_rules! debug_log_enabled {
 
    () => { false };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet};
 

	
 
use crate::collections::{ScopedBuffer, DequeSet};
 
use crate::collections::{ScopedBuffer, ScopedSection, DequeSet};
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
use crate::protocol::parser::type_table::*;
 
use crate::protocol::parser::token_parsing::*;
 
use super::visitor::{
 
    BUFFER_INIT_CAPACITY,
 
    Ctx,
 
    Visitor,
 
    VisitorResult
 
};
 

	
 
const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ];
 
const STRING_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::String, InferenceTypePart::Character ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const SLICE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Slice, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // When we infer types of AST elements that support polymorphic arguments,
 
    // then we might have the case that multiple embedded types depend on the
 
    // polymorphic type (e.g. func bla(T a, T[] b) -> T[][]). If we can infer
 
    // the type in one place (e.g. argument a), then we may propagate this
 
    // information to other types (e.g. argument b and the return type). For
 
    // this reason we place markers in the `InferenceType` instances such that
 
    // we know which part of the type was originally a polymorphic argument.
 
    Marker(u32),
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Bool,
 
    UInt8,
 
@@ -812,162 +812,164 @@ enum DefinitionType{
 
}
 

	
 
impl DefinitionType {
 
    fn definition_id(&self) -> DefinitionId {
 
        match self {
 
            DefinitionType::Component(v) => v.upcast(),
 
            DefinitionType::Function(v) => v.upcast(),
 
        }
 
    }
 
}
 

	
 
pub(crate) struct ResolveQueueElement {
 
    // Note that using the `definition_id` and the `monomorph_idx` one may
 
    // query the type table for the full procedure type, thereby retrieving
 
    // the polymorphic arguments to the procedure.
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) reserved_monomorph_idx: i32,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 

	
 
#[derive(Clone)]
 
struct InferenceExpression {
 
    expr_type: InferenceType,       // result type from expression
 
    expr_id: ExpressionId,          // expression that is evaluated
 
    field_or_monomorph_idx: i32,    // index of field, of index of monomorph array in type table
 
    extra_data_idx: i32,            // index of extra data needed for inference
 
}
 

	
 
impl Default for InferenceExpression {
 
    fn default() -> Self {
 
        Self{
 
            expr_type: InferenceType::default(),
 
            expr_id: ExpressionId::new_invalid(),
 
            field_or_monomorph_idx: -1,
 
            extra_data_idx: -1,
 
        }
 
    }
 
}
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct PassTyping {
 
    // Current definition we're typechecking.
 
    reserved_idx: i32,
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 
    // Buffers for iteration over substatements and subexpressions
 
    // Buffers for iteration over various types
 
    var_buffer: ScopedBuffer<VariableId>,
 
    expr_buffer: ScopedBuffer<ExpressionId>,
 
    stmt_buffer: ScopedBuffer<StatementId>,
 
    bool_buffer: ScopedBuffer<bool>,
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,            // types of variables
 
    expr_types: Vec<InferenceExpression>,                     // will be transferred to type table at end
 
    extra_data: Vec<ExtraData>,       // data for polymorph inference
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    expr_queued: DequeSet<i32>,
 
}
 

	
 
// TODO: @Rename, this is used for a lot of type inferencing. It seems like
 
//  there is a different underlying architecture waiting to surface.
 
struct ExtraData {
 
    expr_id: ExpressionId, // the expression with which this data is associated
 
    definition_id: DefinitionId, // the definition, only used for user feedback
 
    /// Progression of polymorphic variables (if any)
 
    poly_vars: Vec<InferenceType>,
 
    /// Progression of types of call arguments or struct members
 
    embedded: Vec<InferenceType>,
 
    returned: InferenceType,
 
}
 

	
 
impl Default for ExtraData {
 
    fn default() -> Self {
 
        Self{
 
            expr_id: ExpressionId::new_invalid(),
 
            definition_id: DefinitionId::new_invalid(),
 
            poly_vars: Vec::new(),
 
            embedded: Vec::new(),
 
            returned: InferenceType::default(),
 
        }
 
    }
 
}
 

	
 
struct VarData {
 
    /// Type of the variable
 
    var_type: InferenceType,
 
    /// VariableExpressions that use the variable
 
    used_at: Vec<ExpressionId>,
 
    /// For channel statements we link to the other variable such that when one
 
    /// channel's interior type is resolved, we can also resolve the other one.
 
    linked_var: Option<VariableId>,
 
}
 

	
 
impl VarData {
 
    fn new_channel(var_type: InferenceType, other_port: VariableId) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: Some(other_port) }
 
    }
 
    fn new_local(var_type: InferenceType) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: None }
 
    }
 
}
 

	
 
impl PassTyping {
 
    pub(crate) fn new() -> Self {
 
        PassTyping {
 
            reserved_idx: -1,
 
            definition_type: DefinitionType::Function(FunctionDefinitionId::new_invalid()),
 
            poly_vars: Vec::new(),
 
            var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            bool_buffer: ScopedBuffer::with_capacity(16),
 
            var_types: HashMap::new(),
 
            expr_types: Vec::new(),
 
            extra_data: Vec::new(),
 
            expr_queued: DequeSet::new(),
 
        }
 
    }
 

	
 
    pub(crate) fn queue_module_definitions(ctx: &mut Ctx, queue: &mut ResolveQueue) {
 
        debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::ValidatedAndLinked);
 
        let root_id = ctx.module().root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 

	
 
            let first_concrete_part = match definition {
 
                Definition::Function(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        Some(ConcreteTypePart::Function(*definition_id, 0))
 
                    } else {
 
                        None
 
                    }
 
                }
 
                Definition::Component(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        Some(ConcreteTypePart::Component(*definition_id, 0))
 
                    } else {
 
                        None
 
                    }
 
                },
 
                Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => None,
 
            };
 

	
 
            if let Some(first_concrete_part) = first_concrete_part {
 
                let concrete_type = ConcreteType{ parts: vec![first_concrete_part] };
 
                let reserved_idx = ctx.types.reserve_procedure_monomorph_index(definition_id, concrete_type);
 
                queue.push(ResolveQueueElement{
 
                    root_id,
 
                    definition_id: *definition_id,
 
                    reserved_monomorph_idx: reserved_idx,
 
                })
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        self.reset();
 
@@ -2477,172 +2479,174 @@ impl PassTyping {
 
                    "   - Ret type | sig: {}, expr: {}",
 
                    unsafe{&*signature_type}.display_name(&ctx.heap),
 
                    unsafe{&*expr_type}.display_name(&ctx.heap)
 
                );
 
                debug_log!("   - Ret poly progress | {:?}", poly_progress);
 

	
 
                if progress_expr {
 
                    // TODO: @cleanup, borrowing rules
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        let parent_idx = ctx.heap[parent_id].get_unique_id_in_definition();
 
                        self.expr_queued.push_back(parent_idx);
 
                    }
 
                }
 

	
 
                debug_log!(" * During (reinferring from progress polyvars):");
 
            
 
                // For all embedded values of the union variant
 
                for value_idx in 0..extra.embedded.len() {
 
                    let signature_type: *mut _ = &mut extra.embedded[value_idx];
 
                    let value_expr_id = data.values[value_idx];
 
                    let value_expr_idx = ctx.heap[value_expr_id].get_unique_id_in_definition();
 
                    let value_type: *mut _ = &mut self.expr_types[value_expr_idx as usize].expr_type;
 
                    
 
                    let progress_arg = Self::apply_equal2_polyvar_constraint(
 
                        extra, &poly_progress, signature_type, value_type
 
                    );
 

	
 
                    debug_log!(
 
                        "   - Value {} type | sig: {}, value: {}", value_idx,
 
                        unsafe{&*signature_type}.display_name(&ctx.heap),
 
                        unsafe{&*value_type}.display_name(&ctx.heap)
 
                    );
 
                    if progress_arg {
 
                        self.expr_queued.push_back(value_expr_idx);
 
                    }
 
                }
 

	
 
                // And for the union type itself
 
                let signature_type: *mut _ = &mut extra.returned;
 
                let expr_type: *mut _ = &mut self.expr_types[expr_idx as usize].expr_type;
 

	
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    extra, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                progress_expr
 
            },
 
            Literal::Array(data) => {
 
                // TODO: Continue here with performance improvements by reducing allocations
 
                let expr_elements = data.clone(); // TODO: @performance
 
                let expr_elements = self.expr_buffer.start_section_initialized(data.as_slice());
 
                debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
                debug_log!(" * Before:");
 
                debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
                // All elements should have an equal type
 
                let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?;
 
                for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) {
 
                    if *progress_arg {
 
                        self.queue_expr(ctx, *arg_id);
 
                let mut bool_buffer = self.bool_buffer.start_section();
 
                self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements, &mut bool_buffer)?;
 
                for (progress_arg, arg_id) in bool_buffer.iter_copied().zip(expr_elements.iter_copied()) {
 
                    if progress_arg {
 
                        self.queue_expr(ctx, arg_id);
 
                    }
 
                }
 

	
 
                // And the output should be an array of the element types
 
                let mut progress_expr = self.apply_template_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                if !expr_elements.is_empty() {
 
                if expr_elements.len() != 0 {
 
                    let first_arg_id = expr_elements[0];
 
                    let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint(
 
                        ctx, upcast_id, upcast_id, 1, first_arg_id, 0
 
                    )?;
 

	
 
                    progress_expr = progress_expr || inner_expr_progress;
 

	
 
                    // Note that if the array type progressed the type of the arguments,
 
                    // then we should enqueue this progression function again
 
                    // TODO: @fix Make apply_equal_n accept a start idx as well
 
                    if arg_progress { self.queue_expr(ctx, upcast_id); }
 
                }
 

	
 
                debug_log!(" * After:");
 
                debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
                expr_elements.forget();
 
                progress_expr
 
            },
 
            Literal::Tuple(data) => {
 
                let expr_elements = data.clone(); // TODO: @performance
 
                let expr_elements = self.expr_buffer.start_section_initialized(data.as_slice());
 
                debug_log!("Tuple expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
                debug_log!(" * Before:");
 
                debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
                // Initial tuple constraint
 
                let num_members = expr_elements.len();
 
                let mut initial_type = Vec::with_capacity(num_members + 1); // TODO: @performance
 
                initial_type.push(InferenceTypePart::Tuple(num_members as u32));
 
                for _ in 0..num_members {
 
                    initial_type.push(InferenceTypePart::Unknown);
 
                }
 
                let mut progress_expr = self.apply_template_constraint(ctx, upcast_id, &initial_type)?;
 

	
 
                // The elements of the tuple can have any type, but they must
 
                // end up as arguments to the output tuple type.
 
                debug_log!(" * During (checking expressions constituting tuple):");
 
                for (member_expr_index, member_expr_id) in expr_elements.iter().enumerate() {
 
                for (member_expr_index, member_expr_id) in expr_elements.iter_copied().enumerate() {
 
                    // For the current expression index, (re)compute the
 
                    // position in the tuple type where the types should match.
 
                    let mut start_index = 1; // first element is Tuple type, second is the first child
 
                    for _ in 0..member_expr_index {
 
                        let tuple_expr_index = ctx.heap[id].unique_id_in_definition;
 
                        let tuple_type = &self.expr_types[tuple_expr_index as usize].expr_type;
 
                        start_index = InferenceType::find_subtree_end_idx(&tuple_type.parts, start_index);
 
                        debug_assert_ne!(start_index, tuple_type.parts.len()); // would imply less tuple type children than member expressions
 
                    }
 

	
 
                    // Apply the constraint
 
                    let (member_progress_expr, member_progress) = self.apply_equal2_constraint(
 
                        ctx, upcast_id, upcast_id, start_index, *member_expr_id, 0
 
                        ctx, upcast_id, upcast_id, start_index, member_expr_id, 0
 
                    )?;
 
                    debug_log!("   - Member {} type | {}", member_expr_index, self.debug_get_display_name(ctx, *member_expr_id));
 
                    progress_expr = progress_expr || member_progress_expr;
 

	
 
                    if member_progress {
 
                        self.queue_expr(ctx, *member_expr_id);
 
                        self.queue_expr(ctx, member_expr_id);
 
                    }
 
                }
 

	
 
                expr_elements.forget();
 
                progress_expr
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_cast_expr(&mut self, ctx: &mut Ctx, id: CastExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let expr_idx = expr.unique_id_in_definition;
 

	
 
        debug_log!("Casting expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Expr type:    {}", self.debug_get_display_name(ctx, upcast_id));
 
        debug_log!("   - Subject type: {}", self.debug_get_display_name(ctx, expr.subject));
 

	
 
        // The cast expression might have its output type fixed by the
 
        // programmer, so apply that type to the output. Apart from that casting
 
        // acts like a blocker for two-way inference. So we'll just have to wait
 
        // until we know if the cast is valid.
 
        // TODO: Another thing that has to be updated the moment the type
 
        //  inferencer is fully index/job-based
 
        let infer_type = self.determine_inference_type_from_parser_type_elements(&expr.to_type.elements, true);
 
        let expr_progress = self.apply_template_constraint(ctx, upcast_id, &infer_type.parts)?;
 

	
 
        if expr_progress {
 
            self.queue_expr_parent(ctx, upcast_id);
 
        }
 

	
 
        // Check if the two types are compatible
 
        debug_log!(" * After:");
 
        debug_log!("   - Expr type [{}]: {}", expr_progress, self.debug_get_display_name(ctx, upcast_id));
 
        debug_log!("   - Note that the subject type can never be inferred");
 
        debug_log!(" * Decision:");
 

	
 
        let subject_idx = ctx.heap[expr.subject].get_unique_id_in_definition();
 
        let expr_type = &self.expr_types[expr_idx as usize].expr_type;
 
        let subject_type = &self.expr_types[subject_idx as usize].expr_type;
 
        if !expr_type.is_done || !subject_type.is_done {
 
            // Not yet done
 
            debug_log!("   - Casting is valid: unknown as the types are not yet complete");
 
            return Ok(())
 
@@ -3137,158 +3141,170 @@ impl PassTyping {
 
    /// types is made equal.
 
    fn apply_equal3_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId,
 
        start_idx: usize
 
    ) -> Result<(bool, bool, bool), ParseError> {
 
        // Safety: all points are unique
 
        //         containers may not be modified
 
        let expr_expr_idx = ctx.heap[expr_id].get_unique_id_in_definition(); // TODO: @Temp
 
        let arg1_expr_idx = ctx.heap[arg1_id].get_unique_id_in_definition();
 
        let arg2_expr_idx = ctx.heap[arg2_id].get_unique_id_in_definition();
 

	
 
        let expr_type: *mut _ = &mut self.expr_types[expr_expr_idx as usize].expr_type;
 
        let arg1_type: *mut _ = &mut self.expr_types[arg1_expr_idx as usize].expr_type;
 
        let arg2_type: *mut _ = &mut self.expr_types[arg2_expr_idx as usize].expr_type;
 

	
 
        let expr_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx)
 
        };
 
        if expr_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_expr_type_error(ctx, expr_id, arg1_id));
 
        }
 

	
 
        let args_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) };
 
        if args_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        // If all types are compatible, but the second call caused the arg1_type
 
        // to be expanded, then we must also assign this to expr_type.
 
        let mut progress_expr = expr_res.modified_lhs();
 
        let mut progress_arg1 = expr_res.modified_rhs();
 
        let progress_arg2 = args_res.modified_rhs();
 

	
 
        if args_res.modified_lhs() { 
 
            unsafe {
 
                let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx);
 
                let subtree = &((*arg2_type).parts[start_idx..end_idx]);
 
                (*expr_type).replace_subtree(start_idx, subtree);
 
            }
 
            progress_expr = true;
 
            progress_arg1 = true;
 
        }
 

	
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    /// Applies equal constraint to N consecutive expressions. The returned
 
    /// `progress` vec will contain which expressions were progressed and will
 
    /// have length N
 
    // If you ever
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError> {
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        args: &ScopedSection<ExpressionId>, progress: &mut ScopedSection<bool>
 
    ) -> Result<(), ParseError> {
 
        // Early exit
 
        debug_assert_eq!(progress.len(), 0);
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
            0 => {
 
                // nothing to progress
 
                return Ok(())
 
            },
 
            1 => {
 
                // only one type, so nothing to infer
 
                progress.push(false);
 
                return Ok(())
 
            },
 
            n => {
 
                for _ in 0..n {
 
                    progress.push(false);
 
                }
 
            }
 
        }
 

	
 
        let mut progress = Vec::new(); // TODO: @Performance
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut arg_iter = args.iter_copied();
 
        let mut last_arg_id = arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let last_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition(); // TODO: @Temp
 
            let next_expr_idx = ctx.heap[*next_arg_id].get_unique_id_in_definition();
 
            let next_expr_idx = ctx.heap[next_arg_id].get_unique_id_in_definition();
 
            let last_type: *mut _ = &mut self.expr_types[last_expr_idx as usize].expr_type;
 
            let next_type: *mut _ = &mut self.expr_types[next_expr_idx as usize].expr_type;
 

	
 
            let res = unsafe {
 
                InferenceType::infer_subtrees_for_both_types(last_type, 0, next_type, 0)
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
                // We re-inferred something on the left hand side, so everything
 
                // up until now should be re-inferred.
 
                progress[lhs_arg_idx] = true;
 
                last_lhs_progressed = lhs_arg_idx;
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            last_arg_id = next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_arg_expr_idx = ctx.heap[*args.last().unwrap()].get_unique_id_in_definition();
 
        let last_arg_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition();
 
        let last_type: *mut _ = &mut self.expr_types[last_arg_expr_idx as usize].expr_type;
 
        for arg_idx in 0..last_lhs_progressed {
 
            let other_arg_expr_idx = ctx.heap[args[arg_idx]].get_unique_id_in_definition();
 
            let arg_type: *mut _ = &mut self.expr_types[other_arg_expr_idx as usize].expr_type;
 
            unsafe{
 
                (*arg_type).replace_subtree(0, &(*last_type).parts);
 
            }
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
        return Ok(());
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
 
    /// expression parent. Note that if the parent is another expression, we do
 
    /// not take special action, instead we let parent expressions fix the type
 
    /// of subexpressions before they have a chance to call this function.
 
    fn insert_initial_expr_inference_type(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId
 
    ) -> Result<(), ParseError> {
 
        use ExpressionParent as EP;
 
        use InferenceTypePart as ITP;
 

	
 
        let expr = &ctx.heap[expr_id];
 
        let inference_type = match expr.parent() {
 
            EP::None =>
 
                // Should have been set by linker
 
                unreachable!(),
 
            EP::ExpressionStmt(_) =>
 
                // Determined during type inference
 
                InferenceType::new(false, false, vec![ITP::Unknown]),
 
            EP::Expression(parent_id, idx_in_parent) => {
 
                // If we are the test expression of a conditional expression,
 
                // then we must resolve to a boolean
 
                let is_conditional = if let Expression::Conditional(_) = &ctx.heap[*parent_id] {
 
                    true
 
                } else {
 
                    false
 
                };
 

	
 
                if is_conditional && *idx_in_parent == 0 {
 
                    InferenceType::new(false, true, vec![ITP::Bool])
 
                } else {
 
                    InferenceType::new(false, false, vec![ITP::Unknown])
 
                }
 
            },
 
            EP::If(_) | EP::While(_) =>
 
                // Must be a boolean
 
                InferenceType::new(false, true, vec![ITP::Bool]),
 
            EP::Return(_) =>
 
                // Must match the return type of the function
 
                if let DefinitionType::Function(func_id) = self.definition_type {
 
                    debug_assert_eq!(ctx.heap[func_id].return_types.len(), 1);
 
                    let returned = &ctx.heap[func_id].return_types[0];
 
                    self.determine_inference_type_from_parser_type_elements(&returned.elements, true)
 
                } else {
 
                    // Cannot happen: definition always set upon body traversal
 
                    // and "return" calls in components are illegal.
 
                    unreachable!();
src/protocol/parser/pass_validation_linking.rs
Show inline comments
 
@@ -127,98 +127,97 @@ impl PassValidationLinking {
 
            next_expr_index: 0,
 
            variable_buffer: ScopedBuffer::with_capacity(128),
 
            definition_buffer: ScopedBuffer::with_capacity(128),
 
            statement_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            expression_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
        }
 
    }
 

	
 
    fn reset_state(&mut self) {
 
        self.in_sync = SynchronousStatementId::new_invalid();
 
        self.in_while = WhileStatementId::new_invalid();
 
        self.in_test_expr = StatementId::new_invalid();
 
        self.in_binding_expr = BindingExpressionId::new_invalid();
 
        self.in_binding_expr_lhs = false;
 
        self.cur_scope = Scope::Definition(DefinitionId::new_invalid());
 
        self.def_type = DefinitionType::Function(FunctionDefinitionId::new_invalid());
 
        self.prev_stmt = StatementId::new_invalid();
 
        self.expr_parent = ExpressionParent::None;
 
        self.must_be_assignable = None;
 
        self.relative_pos_in_block = 0;
 
        self.next_expr_index = 0
 
    }
 
}
 

	
 
macro_rules! assign_then_erase_next_stmt {
 
    ($self:ident, $ctx:ident, $stmt_id:expr) => {
 
        if !$self.prev_stmt.is_invalid() {
 
            $ctx.heap[$self.prev_stmt].link_next($stmt_id);
 
            $self.prev_stmt = StatementId::new_invalid();
 
        }
 
    }
 
}
 

	
 
macro_rules! assign_and_replace_next_stmt {
 
    ($self:ident, $ctx:ident, $stmt_id:expr) => {
 
        if !$self.prev_stmt.is_invalid() {
 
            $ctx.heap[$self.prev_stmt].link_next($stmt_id);
 
        }
 
        $self.prev_stmt = $stmt_id;
 
    }
 
}
 

	
 
impl Visitor for PassValidationLinking {
 
    fn visit_module(&mut self, ctx: &mut Ctx) -> VisitorResult {
 
        debug_assert_eq!(ctx.module().phase, ModuleCompilationPhase::TypesAddedToTable);
 

	
 
        let root = &ctx.heap[ctx.module().root_id];
 
        let section = self.definition_buffer.start_section_initialized(&root.definitions);
 
        for definition_idx in 0..section.len() {
 
            let definition_id = section[definition_idx];
 
        for definition_id in section.iter_copied() {
 
            self.visit_definition(ctx, definition_id)?;
 
        }
 
        section.forget();
 

	
 
        ctx.module_mut().phase = ModuleCompilationPhase::ValidatedAndLinked;
 
        Ok(())
 
    }
 
    //--------------------------------------------------------------------------
 
    // Definition visitors
 
    //--------------------------------------------------------------------------
 

	
 
    fn visit_component_definition(&mut self, ctx: &mut Ctx, id: ComponentDefinitionId) -> VisitorResult {
 
        self.reset_state();
 

	
 
        self.def_type = match &ctx.heap[id].variant {
 
            ComponentVariant::Primitive => DefinitionType::Primitive(id),
 
            ComponentVariant::Composite => DefinitionType::Composite(id),
 
        };
 
        self.cur_scope = Scope::Definition(id.upcast());
 
        self.expr_parent = ExpressionParent::None;
 

	
 
        // Visit parameters and assign a unique scope ID
 
        let definition = &ctx.heap[id];
 
        let body_id = definition.body;
 
        let section = self.variable_buffer.start_section_initialized(&definition.parameters);
 
        for variable_idx in 0..section.len() {
 
            let variable_id = section[variable_idx];
 
            let variable = &mut ctx.heap[variable_id];
 
            variable.unique_id_in_scope = variable_idx as i32;
 
        }
 
        section.forget();
 

	
 
        // Visit statements in component body
 
        self.visit_block_stmt(ctx, body_id)?;
 

	
 
        // Assign total number of expressions and assign an in-block unique ID
 
        // to each of the locals in the procedure.
 
        ctx.heap[id].num_expressions_in_body = self.next_expr_index;
 
        self.visit_definition_and_assign_local_ids(ctx, id.upcast());
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_function_definition(&mut self, ctx: &mut Ctx, id: FunctionDefinitionId) -> VisitorResult {
 
        self.reset_state();
 

	
 
        // Set internal statement indices
 
        self.def_type = DefinitionType::Function(id);
 
@@ -1366,98 +1365,97 @@ impl PassValidationLinking {
 
    fn visit_statement_for_locals_labels_and_in_sync(&mut self, ctx: &mut Ctx, relative_pos: u32, id: StatementId) -> VisitorResult {
 
        let statement = &mut ctx.heap[id];
 
        match statement {
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Memory(local) => {
 
                        let variable_id = local.variable;
 
                        self.checked_add_local(ctx, relative_pos, variable_id)?;
 
                    },
 
                    LocalStatement::Channel(local) => {
 
                        let from_id = local.from;
 
                        let to_id = local.to;
 
                        self.checked_add_local(ctx, relative_pos, from_id)?;
 
                        self.checked_add_local(ctx, relative_pos, to_id)?;
 
                    }
 
                }
 
            }
 
            Statement::Labeled(stmt) => {
 
                let stmt_id = stmt.this;
 
                let body_id = stmt.body;
 
                self.checked_add_label(ctx, relative_pos, self.in_sync, stmt_id)?;
 
                self.visit_statement_for_locals_labels_and_in_sync(ctx, relative_pos, body_id)?;
 
            },
 
            Statement::While(stmt) => {
 
                stmt.in_sync = self.in_sync;
 
            },
 
            _ => {},
 
        }
 

	
 
        return Ok(())
 
    }
 

	
 
    fn visit_definition_and_assign_local_ids(&mut self, ctx: &mut Ctx, definition_id: DefinitionId) {
 
        let mut var_counter = 0;
 

	
 
        // Set IDs on parameters
 
        let (param_section, body_id) = match &ctx.heap[definition_id] {
 
            Definition::Function(func_def) => (
 
                self.variable_buffer.start_section_initialized(&func_def.parameters),
 
                func_def.body
 
            ),
 
            Definition::Component(comp_def) => (
 
                self.variable_buffer.start_section_initialized(&comp_def.parameters),
 
                comp_def.body
 
            ),
 
            _ => unreachable!(),
 
        } ;
 

	
 
        for idx in 0..param_section.len() {
 
            let var_id = param_section[idx];
 
        for var_id in param_section.iter_copied() {
 
            let var = &mut ctx.heap[var_id];
 
            var.unique_id_in_scope = var_counter;
 
            var_counter += 1;
 
        }
 

	
 
        param_section.forget();
 

	
 
        // Recurse into body
 
        self.visit_block_and_assign_local_ids(ctx, body_id, var_counter);
 
    }
 

	
 
    fn visit_block_and_assign_local_ids(&mut self, ctx: &mut Ctx, block_id: BlockStatementId, mut var_counter: i32) {
 
        let block_stmt = &mut ctx.heap[block_id];
 
        block_stmt.first_unique_id_in_scope = var_counter;
 

	
 
        let var_section = self.variable_buffer.start_section_initialized(&block_stmt.locals);
 
        let mut scope_section = self.statement_buffer.start_section();
 
        for child_scope in &block_stmt.scope_node.nested {
 
            debug_assert!(child_scope.is_block(), "found a child scope that is not a block statement");
 
            scope_section.push(child_scope.to_block().upcast());
 
        }
 

	
 
        let mut var_idx = 0;
 
        let mut scope_idx = 0;
 
        while var_idx < var_section.len() || scope_idx < scope_section.len() {
 
            let relative_var_pos = if var_idx < var_section.len() {
 
                ctx.heap[var_section[var_idx]].relative_pos_in_block
 
            } else {
 
                u32::MAX
 
            };
 

	
 
            let relative_scope_pos = if scope_idx < scope_section.len() {
 
                ctx.heap[scope_section[scope_idx]].as_block().relative_pos_in_parent
 
            } else {
 
                u32::MAX
 
            };
 

	
 
            debug_assert!(!(relative_var_pos == u32::MAX && relative_scope_pos == u32::MAX));
 

	
 
            // In certain cases the relative variable position is the same as
 
            // the scope position (insertion of binding variables). In that case
 
            // the variable should be treated first
 
            if relative_var_pos <= relative_scope_pos {
 
                let var = &mut ctx.heap[var_section[var_idx]];
 
                var.unique_id_in_scope = var_counter;
 
                var_counter += 1;
 
                var_idx += 1;
 
            } else {
0 comments (0 inline, 0 general)