diff --git a/src/collections/scoped_buffer.rs b/src/collections/scoped_buffer.rs index 0d5865bd6d7014a443f35c90c53f082ddd838ef8..7f5194cf31bc6ef317490d82f6dc413fb07e2272 100644 --- a/src/collections/scoped_buffer.rs +++ b/src/collections/scoped_buffer.rs @@ -7,27 +7,28 @@ /// /// It is unsafe because we're using pointers to circumvent borrowing rules in /// the name of code cleanliness. The correctness of use is checked in debug -/// mode. +/// mode at runtime. use std::iter::FromIterator; -pub(crate) struct ScopedBuffer { - pub inner: Vec, +macro_rules! hide { + ($v:block) => { + #[cfg(debug_assertions)] $v + }; + ($v:expr) => { + #[cfg(debug_assertions)] $v + }; } -/// A section of the buffer. Keeps track of where we started the section. When -/// done with the section one must call `into_vec` or `forget` to remove the -/// section from the underlying buffer. This will also be done upon dropping the -/// ScopedSection in case errors are being handled. -pub(crate) struct ScopedSection { - inner: *mut Vec, - start_size: u32, - #[cfg(debug_assertions)] cur_size: u32, +pub(crate) struct ScopedBuffer { + pub inner: Vec, } impl ScopedBuffer { pub(crate) fn with_capacity(capacity: usize) -> Self { - Self { inner: Vec::with_capacity(capacity) } + Self { + inner: Vec::with_capacity(capacity), + } } pub(crate) fn start_section(&mut self) -> ScopedSection { @@ -35,7 +36,7 @@ impl ScopedBuffer { ScopedSection { inner: &mut self.inner, start_size, - #[cfg(debug_assertions)] cur_size: start_size + #[cfg(debug_assertions)] cur_size: start_size, } } } @@ -61,18 +62,35 @@ impl Drop for ScopedBuffer { } } +/// A section of the buffer. Keeps track of where we started the section. When +/// done with the section one must call `into_vec` or `forget` to remove the +/// section from the underlying buffer. This will also be done upon dropping the +/// ScopedSection in case errors are being handled. +pub(crate) struct ScopedSection { + inner: *mut Vec, + start_size: u32, + #[cfg(debug_assertions)] cur_size: u32, +} + impl ScopedSection { #[inline] pub(crate) fn push(&mut self, value: T) { let vec = unsafe{&mut *self.inner}; - #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to push onto section, but size is larger than expected"); + hide!(debug_assert_eq!( + vec.len(), self.cur_size as usize, + "trying to push onto section, but size is larger than expected" + )); vec.push(value); - #[cfg(debug_assertions)] { self.cur_size += 1; } + hide!(self.cur_size += 1); } + #[inline] pub(crate) fn len(&self) -> usize { let vec = unsafe{&mut *self.inner}; - #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize, "trying to get section length, but size is larger than expected"); + hide!(debug_assert_eq!( + vec.len(), self.cur_size as usize, + "trying to get section length, but size is larger than expected" + )); return vec.len() - self.start_size as usize; } @@ -80,13 +98,13 @@ impl ScopedSection { #[allow(unused_mut)] // used in debug mode pub(crate) fn forget(mut self) { let vec = unsafe{&mut *self.inner}; - #[cfg(debug_assertions)] { + hide!({ debug_assert_eq!( vec.len(), self.cur_size as usize, "trying to forget section, but size is larger than expected" ); self.cur_size = self.start_size; - } + }); vec.truncate(self.start_size as usize); } @@ -94,13 +112,13 @@ impl ScopedSection { #[allow(unused_mut)] // used in debug mode pub(crate) fn into_vec(mut self) -> Vec { let vec = unsafe{&mut *self.inner}; - #[cfg(debug_assertions)] { + hide!({ debug_assert_eq!( vec.len(), self.cur_size as usize, "trying to turn section into vec, but size is larger than expected" ); self.cur_size = self.start_size; - } + }); let section = Vec::from_iter(vec.drain(self.start_size as usize..)); section } @@ -116,12 +134,19 @@ impl ScopedSection { } } -impl std::ops::Index for ScopedSection { +impl std::ops::Index for ScopedSection { type Output = T; - fn index(&self, idx: usize) -> &Self::Output { + fn index(&self, index: usize) -> &Self::Output { let vec = unsafe{&*self.inner}; - return &vec[self.start_size as usize + idx] + return &vec[self.start_size as usize + index] + } +} + +impl std::ops::IndexMut for ScopedSection { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + let vec = unsafe{&mut *self.inner}; + return &mut vec[self.start_size as usize + index] } } @@ -129,11 +154,14 @@ impl std::ops::Index for ScopedSection { impl Drop for ScopedSection { fn drop(&mut self) { let vec = unsafe{&mut *self.inner}; - #[cfg(debug_assertions)] debug_assert_eq!(vec.len(), self.cur_size as usize); + hide!(debug_assert_eq!(vec.len(), self.cur_size as usize)); vec.truncate(self.start_size as usize); } } +/// Small utility for iterating over a section of the buffer. Same conditions as +/// the buffer apply: each time we retrieve an element the buffer must have the +/// same size as the moment of creation. pub(crate) struct ScopedIter { inner: *mut Vec, cur_index: u32, @@ -144,6 +172,7 @@ impl Iterator for ScopedIter { type Item = T; fn next(&mut self) -> Option { + hide!(debug_assert_eq!(self.last_index as usize, unsafe { (*self.inner).len() })); if self.cur_index >= self.last_index { return None; } diff --git a/src/protocol/parser/pass_typing.rs b/src/protocol/parser/pass_typing.rs index 0391ec8fa792b4e7dcaba09fe5d36d753a6ec076..d9138cbde013caefe4579b686441aa87ecab909e 100644 --- a/src/protocol/parser/pass_typing.rs +++ b/src/protocol/parser/pass_typing.rs @@ -49,7 +49,7 @@ macro_rules! debug_log { use std::collections::{HashMap, HashSet}; -use crate::collections::{ScopedBuffer, DequeSet}; +use crate::collections::{ScopedBuffer, ScopedSection, DequeSet}; use crate::protocol::ast::*; use crate::protocol::input_source::ParseError; use crate::protocol::parser::ModuleCompilationPhase; @@ -857,10 +857,11 @@ pub(crate) struct PassTyping { reserved_idx: i32, definition_type: DefinitionType, poly_vars: Vec, - // Buffers for iteration over substatements and subexpressions + // Buffers for iteration over various types var_buffer: ScopedBuffer, expr_buffer: ScopedBuffer, stmt_buffer: ScopedBuffer, + bool_buffer: ScopedBuffer, // Mapping from parser type to inferred type. We attempt to continue to // specify these types until we're stuck or we've fully determined the type. var_types: HashMap, // types of variables @@ -923,6 +924,7 @@ impl PassTyping { var_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY), expr_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY), stmt_buffer: ScopedBuffer::with_capacity(BUFFER_INIT_CAPACITY), + bool_buffer: ScopedBuffer::with_capacity(16), var_types: HashMap::new(), expr_types: Vec::new(), extra_data: Vec::new(), @@ -2522,23 +2524,23 @@ impl PassTyping { progress_expr }, Literal::Array(data) => { - // TODO: Continue here with performance improvements by reducing allocations - let expr_elements = data.clone(); // TODO: @performance + let expr_elements = self.expr_buffer.start_section_initialized(data.as_slice()); debug_log!("Array expr ({} elements): {}", expr_elements.len(), upcast_id.index); debug_log!(" * Before:"); debug_log!(" - Expr type: {}", self.debug_get_display_name(ctx, upcast_id)); // All elements should have an equal type - let progress = self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements)?; - for (progress_arg, arg_id) in progress.iter().zip(expr_elements.iter()) { - if *progress_arg { - self.queue_expr(ctx, *arg_id); + let mut bool_buffer = self.bool_buffer.start_section(); + self.apply_equal_n_constraint(ctx, upcast_id, &expr_elements, &mut bool_buffer)?; + for (progress_arg, arg_id) in bool_buffer.iter_copied().zip(expr_elements.iter_copied()) { + if progress_arg { + self.queue_expr(ctx, arg_id); } } // And the output should be an array of the element types let mut progress_expr = self.apply_template_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?; - if !expr_elements.is_empty() { + if expr_elements.len() != 0 { let first_arg_id = expr_elements[0]; let (inner_expr_progress, arg_progress) = self.apply_equal2_constraint( ctx, upcast_id, upcast_id, 1, first_arg_id, 0 @@ -2555,10 +2557,11 @@ impl PassTyping { debug_log!(" * After:"); debug_log!(" - Expr type [{}]: {}", progress_expr, self.debug_get_display_name(ctx, upcast_id)); + expr_elements.forget(); progress_expr }, Literal::Tuple(data) => { - let expr_elements = data.clone(); // TODO: @performance + let expr_elements = self.expr_buffer.start_section_initialized(data.as_slice()); debug_log!("Tuple expr ({} elements): {}", expr_elements.len(), upcast_id.index); debug_log!(" * Before:"); debug_log!(" - Expr type: {}", self.debug_get_display_name(ctx, upcast_id)); @@ -2575,7 +2578,7 @@ impl PassTyping { // The elements of the tuple can have any type, but they must // end up as arguments to the output tuple type. debug_log!(" * During (checking expressions constituting tuple):"); - for (member_expr_index, member_expr_id) in expr_elements.iter().enumerate() { + for (member_expr_index, member_expr_id) in expr_elements.iter_copied().enumerate() { // For the current expression index, (re)compute the // position in the tuple type where the types should match. let mut start_index = 1; // first element is Tuple type, second is the first child @@ -2588,16 +2591,17 @@ impl PassTyping { // Apply the constraint let (member_progress_expr, member_progress) = self.apply_equal2_constraint( - ctx, upcast_id, upcast_id, start_index, *member_expr_id, 0 + ctx, upcast_id, upcast_id, start_index, member_expr_id, 0 )?; debug_log!(" - Member {} type | {}", member_expr_index, self.debug_get_display_name(ctx, *member_expr_id)); progress_expr = progress_expr || member_progress_expr; if member_progress { - self.queue_expr(ctx, *member_expr_id); + self.queue_expr(ctx, member_expr_id); } } + expr_elements.forget(); progress_expr } }; @@ -3182,31 +3186,43 @@ impl PassTyping { Ok((progress_expr, progress_arg1, progress_arg2)) } - // TODO: @optimize Since we only deal with a single type this might be done - // a lot more efficiently, methinks (disregarding the allocations here) + /// Applies equal constraint to N consecutive expressions. The returned + /// `progress` vec will contain which expressions were progressed and will + /// have length N + // If you ever fn apply_equal_n_constraint( - &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId], - ) -> Result, ParseError> { + &mut self, ctx: &Ctx, expr_id: ExpressionId, + args: &ScopedSection, progress: &mut ScopedSection + ) -> Result<(), ParseError> { // Early exit + debug_assert_eq!(progress.len(), 0); match args.len() { - 0 => return Ok(vec!()), // nothing to progress - 1 => return Ok(vec![false]), // only one type, so nothing to infer - _ => {} + 0 => { + // nothing to progress + return Ok(()) + }, + 1 => { + // only one type, so nothing to infer + progress.push(false); + return Ok(()) + }, + n => { + for _ in 0..n { + progress.push(false); + } + } } - let mut progress = Vec::new(); // TODO: @Performance - progress.resize(args.len(), false); - // Do pairwise inference, keep track of the last entry we made progress // on. Once done we need to update everything to the most-inferred type. - let mut arg_iter = args.iter(); - let mut last_arg_id = *arg_iter.next().unwrap(); + let mut arg_iter = args.iter_copied(); + let mut last_arg_id = arg_iter.next().unwrap(); let mut last_lhs_progressed = 0; let mut lhs_arg_idx = 0; while let Some(next_arg_id) = arg_iter.next() { let last_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition(); // TODO: @Temp - let next_expr_idx = ctx.heap[*next_arg_id].get_unique_id_in_definition(); + let next_expr_idx = ctx.heap[next_arg_id].get_unique_id_in_definition(); let last_type: *mut _ = &mut self.expr_types[last_expr_idx as usize].expr_type; let next_type: *mut _ = &mut self.expr_types[next_expr_idx as usize].expr_type; @@ -3215,7 +3231,7 @@ impl PassTyping { }; if res == DualInferenceResult::Incompatible { - return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id)); + return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, next_arg_id)); } if res.modified_lhs() { @@ -3226,13 +3242,13 @@ impl PassTyping { } progress[lhs_arg_idx + 1] = res.modified_rhs(); - last_arg_id = *next_arg_id; + last_arg_id = next_arg_id; lhs_arg_idx += 1; } // Re-infer everything. Note that we do not need to re-infer the type // exactly at `last_lhs_progressed`, but only everything up to it. - let last_arg_expr_idx = ctx.heap[*args.last().unwrap()].get_unique_id_in_definition(); + let last_arg_expr_idx = ctx.heap[last_arg_id].get_unique_id_in_definition(); let last_type: *mut _ = &mut self.expr_types[last_arg_expr_idx as usize].expr_type; for arg_idx in 0..last_lhs_progressed { let other_arg_expr_idx = ctx.heap[args[arg_idx]].get_unique_id_in_definition(); @@ -3243,7 +3259,7 @@ impl PassTyping { progress[arg_idx] = true; } - Ok(progress) + return Ok(()); } /// Determines the `InferenceType` for the expression based on the diff --git a/src/protocol/parser/pass_validation_linking.rs b/src/protocol/parser/pass_validation_linking.rs index 3753d46b648257fef64d7a2ee40edc686c55b948..df9d4551039e33aced60cb296859e92fd23c9d79 100644 --- a/src/protocol/parser/pass_validation_linking.rs +++ b/src/protocol/parser/pass_validation_linking.rs @@ -172,8 +172,7 @@ impl Visitor for PassValidationLinking { let root = &ctx.heap[ctx.module().root_id]; let section = self.definition_buffer.start_section_initialized(&root.definitions); - for definition_idx in 0..section.len() { - let definition_id = section[definition_idx]; + for definition_id in section.iter_copied() { self.visit_definition(ctx, definition_id)?; } section.forget(); @@ -1411,8 +1410,7 @@ impl PassValidationLinking { _ => unreachable!(), } ; - for idx in 0..param_section.len() { - let var_id = param_section[idx]; + for var_id in param_section.iter_copied() { let var = &mut ctx.heap[var_id]; var.unique_id_in_scope = var_counter; var_counter += 1;