Changeset - 2df3ee19d578
[Not reviewed]
0 3 0
MH - 4 years ago 2021-12-16 11:34:22
contact@maxhenger.nl
Remove more hot-loop allocations
3 files changed with 101 insertions and 58 deletions:
0 comments (0 inline, 0 general)
src/collections/scoped_buffer.rs
Show inline comments
 
@@ -7,27 +7,28 @@
 
///
 
/// It is unsafe because we're using pointers to circumvent borrowing rules in
 
/// the name of code cleanliness. The correctness of use is checked in debug
 
/// mode.
 
/// mode at runtime.
 

	
 
use std::iter::FromIterator;
 

	
 
pub(crate) struct ScopedBuffer<T: Sized> {
 
    pub inner: Vec<T>,
 
macro_rules! hide {
 
    ($v:block) => {
 
        #[cfg(debug_assertions)] $v
 
    };
 
    ($v:expr) => {
 
        #[cfg(debug_assertions)] $v
 
    };
 
}
 

	
 
/// A section of the buffer. Keeps track of where we started the section. When
 
/// done with the section one must call `into_vec` or `forget` to remove the
 
/// section from the underlying buffer. This will also be done upon dropping the
 
/// ScopedSection in case errors are being handled.
 
pub(crate) struct ScopedSection<T: Sized> {
 
    inner: *mut Vec<T>,
 
    start_size: u32,
 
    #[cfg(debug_assertions)] cur_size: u32,
 
pub(crate) struct ScopedBuffer<T: Sized> {
 
    pub inner: Vec<T>,
 
}
 

	
 
impl<T: Sized> ScopedBuffer<T> {
 
    pub(crate) fn with_capacity(capacity: usize) -> Self {
 
        Self { inner: Vec::with_capacity(capacity) }
 
        Self {
 
            inner: Vec::with_capacity(capacity),
 
        }
 
    }
 

	
 
    pub(crate) fn start_section(&mut self) -> ScopedSection<T> {
 
@@ -35,7 +36,7 @@ impl<T: Sized> ScopedBuffer<T> {
 
        ScopedSection {
 
            inner: &mut self.inner,
 
            start_size,
 
            #[cfg(debug_assertions)] cur_size: start_size
 
            #[cfg(debug_assertions)] cur_size: start_size,
 
        }
 
    }
 
}
 
@@ -61,18 +62,35 @@ impl<T: Sized> Drop for ScopedBuffer<T> {
 
    }
 
}
 

	
 
/// A section of the buffer. Keeps track of where we started the section. When
 
/// done with the section one must call `into_vec` or `forget` to remove the
 
/// section from the underlying buffer. This will also be done upon dropping the
 
/// ScopedSection in case errors are being handled.
 
pub(crate) struct ScopedSection<T: Sized> {
 
    inner: *mut Vec<T>,
 
    start_size: u32,
 
    #[cfg(debug_assertions)] cur_size: u32,
 
}
 

	
 
impl<T: Sized> ScopedSection<T> {
 
    #[inline]
 
    pub(crate) fn push(&mut self, value: T) {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to push onto section, but size is larger than expected");
 
        hide!(debug_assert_eq!(
 
            vec.len(), self.cur_size as usize,
 
            "trying to push onto section, but size is larger than expected"
 
        ));
 
        vec.push(value);
 
        #[cfg(debug_assertions)] { self.cur_size += 1; }
 
        hide!(self.cur_size += 1);
 
    }
 

	
 
    #[inline]
 
    pub(crate) fn len(&self) -> usize {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to get section length, but size is larger than expected");
 
        hide!(debug_assert_eq!(
 
            vec.len(), self.cur_size as usize,
 
            "trying to get section length, but size is larger than expected"
 
        ));
 
        return vec.len() - self.start_size as usize;
 
    }
 

	
 
@@ -80,13 +98,13 @@ impl<T: Sized> ScopedSection<T> {
 
    #[allow(unused_mut)] // used in debug mode
 
    pub(crate) fn forget(mut self) {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] {
 
        hide!({
 
            debug_assert_eq!(
 
                vec.len(), self.cur_size as usize,
 
                "trying to forget section, but size is larger than expected"
 
            );
 
            self.cur_size = self.start_size;
 
        }
 
        });
 
        vec.truncate(self.start_size as usize);
 
    }
 

	
 
@@ -94,13 +112,13 @@ impl<T: Sized> ScopedSection<T> {
 
    #[allow(unused_mut)] // used in debug mode
 
    pub(crate) fn into_vec(mut self) -> Vec<T> {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)]  {
 
        hide!({
 
            debug_assert_eq!(
 
                vec.len(), self.cur_size as usize,
 
                "trying to turn section into vec, but size is larger than expected"
 
            );
 
            self.cur_size = self.start_size;
 
        }
 
        });
 
        let section = Vec::from_iter(vec.drain(self.start_size as usize..));
 
        section
 
    }
 
@@ -116,12 +134,19 @@ impl<T: Copy> ScopedSection<T> {
 
    }
 
}
 

	
 
impl<T: Sized> std::ops::Index<usize> for ScopedSection<T> {
 
impl<T> std::ops::Index<usize> for ScopedSection<T> {
 
    type Output = T;
 

	
 
    fn index(&self, idx: usize) -> &Self::Output {
 
    fn index(&self, index: usize) -> &Self::Output {
 
        let vec = unsafe{&*self.inner};
 
        return &vec[self.start_size as usize + idx]
 
        return &vec[self.start_size as usize + index]
 
    }
 
}
 

	
 
impl<T> std::ops::IndexMut<usize> for ScopedSection<T> {
 
    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
 
        let vec = unsafe{&mut *self.inner};
 
        return &mut vec[self.start_size as usize + index]
 
    }
 
}
 

	
 
@@ -129,11 +154,14 @@ impl<T: Sized> std::ops::Index<usize> for ScopedSection<T> {
 
impl<T: Sized> Drop for ScopedSection<T> {
 
    fn drop(&mut self) {
 
        let vec = unsafe{&mut *self.inner};
 
        #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize);
 
        hide!(debug_assert_eq!(vec.len(), self.cur_size as usize));
 
        vec.truncate(self.start_size as usize);
 
    }
 
}
 

	
 
/// Small utility for iterating over a section of the buffer. Same conditions as
 
/// the buffer apply: each time we retrieve an element the buffer must have the
 
/// same size as the moment of creation.
 
pub(crate) struct ScopedIter<T: Copy> {
 
    inner: *mut Vec<T>,
 
    cur_index: u32,
 
@@ -144,6 +172,7 @@ impl<T: Copy> Iterator for ScopedIter<T> {
 
    type Item = T;
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        hide!(debug_assert_eq!(self.last_index as usize, unsafe { (*self.inner).len() }));
 
        if self.cur_index >= self.last_index {
 
            return None;
 
        }
src/protocol/parser/pass_typing.rs
Show inline comments
 
@@ -49,7 +49,7 @@ macro_rules! debug_log {
 

	
 
use std::collections::{HashMap, HashSet};
 

	
 
use crate::collections::{ScopedBuffer, DequeSet};
 
use crate::collections::{ScopedBuffer, ScopedSection, DequeSet};
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
@@ -857,10 +857,11 @@ pub(crate) struct PassTyping {
 
    reserved_idx: i32,
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 
    // Buffers for iteration over substatements and subexpressions
 
    // Buffers for iteration over various types
 
    var_buffer: ScopedBuffer<VariableId>,
 
    expr_buffer: ScopedBuffer<ExpressionId>,
 
    stmt_buffer: ScopedBuffer<StatementId>,
 
    bool_buffer: ScopedBuffer<bool>,
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,            // types of variables
 
@@ -923,6 +924,7 @@ impl PassTyping {
 
            var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY),
 
            bool_buffer: ScopedBuffer::with_capacity(16),
 
            var_types: HashMap::new(),
 
            expr_types: Vec::new(),
 
            extra_data: Vec::new(),
 
@@ -2522,23 +2524,23 @@ impl PassTyping {
 
                progress_expr
 
            },
 
            Literal::Array(data) => {
 
                // TODO: Continue here with performance improvements by reducing allocations
 
                let expr_elements = data.clone(); // TODO: @performance
 
                let expr_elements = self.expr_buffer.start_section_initialized(data.as_slice());
 
                debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
                debug_log!(" * Before:");
 
                debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 

	
 
                // All elements should have an equal type
 
                let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?;
 
                for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) {
 
                    if *progress_arg {
 
                        self.queue_expr(ctx, *arg_id);
 
                let mut bool_buffer = self.bool_buffer.start_section();
 
                self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements, &mut bool_buffer)?;
 
                for (progress_arg, arg_id) in bool_buffer.iter_copied().zip(expr_elements.iter_copied()) {
 
                    if progress_arg {
 
                        self.queue_expr(ctx, arg_id);
 
                    }
 
                }
 

	
 
                // And the output should be an array of the element types
 
                let mut progress_expr = self.apply_template_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                if !expr_elements.is_empty() {
 
                if expr_elements.len() != 0 {
 
                    let first_arg_id = expr_elements[0];
 
                    let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint(
 
                        ctx, upcast_id, upcast_id, 1, first_arg_id, 0
 
@@ -2555,10 +2557,11 @@ impl PassTyping {
 
                debug_log!(" * After:");
 
                debug_log!("   - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id));
 

	
 
                expr_elements.forget();
 
                progress_expr
 
            },
 
            Literal::Tuple(data) => {
 
                let expr_elements = data.clone(); // TODO: @performance
 
                let expr_elements = self.expr_buffer.start_section_initialized(data.as_slice());
 
                debug_log!("Tuple expr ({} elements): {}", expr_elements.len(), upcast_id.index);
 
                debug_log!(" * Before:");
 
                debug_log!("   - Expr type: {}", self.debug_get_display_name(ctx, upcast_id));
 
@@ -2575,7 +2578,7 @@ impl PassTyping {
 
                // The elements of the tuple can have any type, but they must
 
                // end up as arguments to the output tuple type.
 
                debug_log!(" * During (checking expressions constituting tuple):");
 
                for (member_expr_index, member_expr_id) in expr_elements.iter().enumerate() {
 
                for (member_expr_index, member_expr_id) in expr_elements.iter_copied().enumerate() {
 
                    // For the current expression index, (re)compute the
 
                    // position in the tuple type where the types should match.
 
                    let mut start_index = 1; // first element is Tuple type, second is the first child
 
@@ -2588,16 +2591,17 @@ impl PassTyping {
 

	
 
                    // Apply the constraint
 
                    let (member_progress_expr, member_progress) = self.apply_equal2_constraint(
 
                        ctx, upcast_id, upcast_id, start_index, *member_expr_id, 0
 
                        ctx, upcast_id, upcast_id, start_index, member_expr_id, 0
 
                    )?;
 
                    debug_log!("   - Member {} type | {}", member_expr_index, self.debug_get_display_name(ctx, *member_expr_id));
 
                    progress_expr = progress_expr || member_progress_expr;
 

	
 
                    if member_progress {
 
                        self.queue_expr(ctx, *member_expr_id);
 
                        self.queue_expr(ctx, member_expr_id);
 
                    }
 
                }
 

	
 
                expr_elements.forget();
 
                progress_expr
 
            }
 
        };
 
@@ -3182,31 +3186,43 @@ impl PassTyping {
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    /// Applies equal constraint to N consecutive expressions. The returned
 
    /// `progress` vec will contain which expressions were progressed and will
 
    /// have length N
 
    // If you ever
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError> {
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        args: &ScopedSection<ExpressionId>, progress: &mut ScopedSection<bool>
 
    ) -> Result<(), ParseError> {
 
        // Early exit
 
        debug_assert_eq!(progress.len(), 0);
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
            0 => {
 
                // nothing to progress
 
                return Ok(())
 
            },
 
            1 => {
 
                // only one type, so nothing to infer
 
                progress.push(false);
 
                return Ok(())
 
            },
 
            n => {
 
                for _ in 0..n {
 
                    progress.push(false);
 
                }
 
            }
 
        }
 

	
 
        let mut progress = Vec::new(); // TODO: @Performance
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut arg_iter = args.iter_copied();
 
        let mut last_arg_id = arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let last_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition(); // TODO: @Temp
 
            let next_expr_idx = ctx.heap[*next_arg_id].get_unique_id_in_definition();
 
            let next_expr_idx = ctx.heap[next_arg_id].get_unique_id_in_definition();
 
            let last_type: *mut _ = &mut self.expr_types[last_expr_idx as usize].expr_type;
 
            let next_type: *mut _ = &mut self.expr_types[next_expr_idx as usize].expr_type;
 

	
 
@@ -3215,7 +3231,7 @@ impl PassTyping {
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
@@ -3226,13 +3242,13 @@ impl PassTyping {
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            last_arg_id = next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_arg_expr_idx = ctx.heap[*args.last().unwrap()].get_unique_id_in_definition();
 
        let last_arg_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition();
 
        let last_type: *mut _ = &mut self.expr_types[last_arg_expr_idx as usize].expr_type;
 
        for arg_idx in 0..last_lhs_progressed {
 
            let other_arg_expr_idx = ctx.heap[args[arg_idx]].get_unique_id_in_definition();
 
@@ -3243,7 +3259,7 @@ impl PassTyping {
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
        return Ok(());
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
src/protocol/parser/pass_validation_linking.rs
Show inline comments
 
@@ -172,8 +172,7 @@ impl Visitor for PassValidationLinking {
 

	
 
        let root = &ctx.heap[ctx.module().root_id];
 
        let section = self.definition_buffer.start_section_initialized(&root.definitions);
 
        for definition_idx in 0..section.len() {
 
            let definition_id = section[definition_idx];
 
        for definition_id in section.iter_copied() {
 
            self.visit_definition(ctx, definition_id)?;
 
        }
 
        section.forget();
 
@@ -1411,8 +1410,7 @@ impl PassValidationLinking {
 
            _ => unreachable!(),
 
        } ;
 

	
 
        for idx in 0..param_section.len() {
 
            let var_id = param_section[idx];
 
        for var_id in param_section.iter_copied() {
 
            let var = &mut ctx.heap[var_id];
 
            var.unique_id_in_scope = var_counter;
 
            var_counter += 1;
0 comments (0 inline, 0 general)