Changeset - 62d63e1cec92
[Not reviewed]
MH - 3 years ago 2022-03-04 19:18:58
contact@maxhenger.nl
Fix conditional compilation, remove some warnings
9 files changed with 22 insertions and 29 deletions:
0 comments (0 inline, 0 general)
src/protocol/eval/executor.rs
Show inline comments
 
@@ -24,23 +24,22 @@ pub(crate) enum ExprInstruction {
 
    PushValToFront,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Frame {
 
    pub(crate) definition: ProcedureDefinitionId,
 
    pub(crate) monomorph_type_id: TypeId,
 
    pub(crate) monomorph_index: usize,
 
    pub(crate) position: StatementId,
 
    pub(crate) expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    pub(crate) expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
    pub(crate) max_stack_size: u32,
 
}
 

	
 
impl Frame {
 
    /// Creates a new execution frame. Does not modify the stack in any way.
 
    pub fn new(heap: &Heap, definition_id: ProcedureDefinitionId, monomorph_type_id: TypeId, monomorph_index: u32) -> Self {
 
    pub fn new(heap: &Heap, definition_id: ProcedureDefinitionId, _monomorph_type_id: TypeId, monomorph_index: u32) -> Self {
 
        let definition = &heap[definition_id];
 
        let outer_scope_id = definition.scope;
 
        let first_statement_id = definition.body;
 

	
 
        // Another not-so-pretty thing that has to be replaced somewhere in the
 
        // future...
 
@@ -59,13 +58,12 @@ impl Frame {
 

	
 
        let mut max_stack_size = 0;
 
        determine_max_stack_size(heap, outer_scope_id, &mut max_stack_size);
 

	
 
        Frame{
 
            definition: definition_id,
 
            monomorph_type_id,
 
            monomorph_index: monomorph_index as usize,
 
            position: first_statement_id.upcast(),
 
            expr_stack: VecDeque::with_capacity(128),
 
            expr_values: VecDeque::with_capacity(128),
 
            max_stack_size,
 
        }
src/protocol/parser/pass_rewriting.rs
Show inline comments
 
@@ -264,13 +264,13 @@ impl Visitor for PassRewriting {
 

	
 
        call_id_section.forget();
 
        expr_id_section.forget();
 

	
 
        // Now we transform each of the select block case's guard and code into
 
        // a chained if-else statement.
 
        let mut relative_pos = transformed_stmts.len() as i32;
 
        let relative_pos = transformed_stmts.len() as i32;
 
        if total_num_cases > 0 {
 
            let (if_stmt_id, end_if_stmt_id, scope_id) = transform_select_case_code(ctx, self.current_procedure_id, id, 0, select_variable_id, select_variable_type);
 
            link_existing_child_to_new_parent_scope(ctx, &mut self.scope_buffer, outer_scope_id, scope_id, relative_pos);
 
            let first_end_if_stmt = &mut ctx.heap[end_if_stmt_id];
 
            first_end_if_stmt.next = outer_end_block_id.upcast();
 

	
src/protocol/parser/pass_validation_linking.rs
Show inline comments
 
@@ -1290,17 +1290,17 @@ impl Visitor for PassValidationLinking {
 
                            // the check above we know we're directly at the
 
                            // LHS of the binding expression
 
                            debug_assert_eq!(_binding_expr.this, self.in_binding_expr);
 
                            debug_assert_eq!(idx, 0);
 
                            true
 
                        }
 
                        Expression::Literal(lit_expr) => {
 
                        Expression::Literal(_lit_expr) => {
 
                            // Only struct, unions, tuples and arrays can
 
                            // have subexpressions, so we're always fine
 
                            dbg_code!({
 
                                match lit_expr.value {
 
                                match _lit_expr.value {
 
                                    Literal::Struct(_) | Literal::Union(_) | Literal::Array(_) | Literal::Tuple(_) => {},
 
                                    _ => unreachable!(),
 
                                }
 
                            });
 

	
 
                            true
src/protocol/parser/type_table.rs
Show inline comments
 
@@ -54,12 +54,13 @@ use crate::protocol::parser::*;
 
/// have any polymorphic arguments then it will not have any monomorphs and
 
/// `is_polymorph` will be set to `false`. A type with polymorphic arguments
 
/// only has `is_polymorph` set to `true` if the polymorphic arguments actually
 
/// appear in the types associated types (function return argument, struct
 
/// field, enum variant, etc.). Otherwise the polymorphic argument is just a
 
/// marker and does not influence the bytesize of the type.
 
#[allow(unused)]
 
pub struct DefinedType {
 
    pub(crate) ast_root: RootId,
 
    pub(crate) ast_definition: DefinitionId,
 
    pub(crate) definition: DefinedTypeVariant,
 
    pub(crate) poly_vars: Vec<PolymorphicVariable>,
 
    pub(crate) is_polymorph: bool,
 
@@ -1452,14 +1453,12 @@ impl TypeTable {
 
    /// return `PushBreadcrumb` then call `handle_new_breadcrumb_for_type_loops`
 
    /// to take care of storing the appropriate types.
 
    fn check_member_for_type_loops(
 
        breadcrumbs: &[TypeLoopBreadcrumb], definition_map: &DefinitionMap, mono_type_map: &MonoTypeMap,
 
        mono_key: &mut MonoSearchKey, concrete_type: &ConcreteType
 
    ) -> TypeLoopResult {
 
        use ConcreteTypePart as CTP;
 

	
 
        // Depending on the type, lookup if the type has already been visited
 
        // (i.e. either already has its memory layed out, or is part of a type
 
        // loop because we've already visited the type)
 
        debug_assert!(!concrete_type.parts.is_empty());
 
        let definition_id = if let ConcreteTypePart::Instance(definition_id, _) = concrete_type.parts[0] {
 
            definition_id
src/runtime/scheduler.rs
Show inline comments
 
@@ -407,17 +407,17 @@ impl Scheduler {
 
            if should_wake_up_again {
 
                self.runtime.push_work(connector_key)
 
            }
 
        }
 
    }
 

	
 
    fn debug(&self, message: &str) {
 
    fn debug(&self, _message: &str) {
 
        // println!("DEBUG [thrd:{:02} conn:  ]: {}", self.scheduler_id, message);
 
    }
 

	
 
    fn debug_conn(&self, conn: ConnectorId, message: &str) {
 
    fn debug_conn(&self, _conn: ConnectorId, _message: &str) {
 
        // println!("DEBUG [thrd:{:02} conn:{:02}]: {}", self.scheduler_id, conn.index, message);
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// ComponentCtx
src/runtime2/component/component_context.rs
Show inline comments
 
@@ -55,21 +55,21 @@ impl CompCtx {
 
        self.ports.push(Port{
 
            self_id: putter_id,
 
            peer_port_id: getter_id,
 
            kind: PortKind::Putter,
 
            state: PortState::Open,
 
            peer_comp_id: self.id,
 
            associated_with_peer: false,
 
            #[cfg(debug_assertions)] associated_with_peer: false,
 
        });
 
        self.ports.push(Port{
 
            self_id: getter_id,
 
            peer_port_id: putter_id,
 
            kind: PortKind::Getter,
 
            state: PortState::Open,
 
            peer_comp_id: self.id,
 
            associated_with_peer: false,
 
            #[cfg(debug_assertions)] associated_with_peer: false,
 
        });
 

	
 
        return Channel{ putter_id, getter_id };
 
    }
 

	
 
    /// Adds a new port. Make sure to call `add_peer` afterwards.
 
@@ -83,25 +83,25 @@ impl CompCtx {
 
    }
 

	
 
    /// Removes a port. Make sure you called `remove_peer` first.
 
    pub(crate) fn remove_port(&mut self, port_handle: LocalPortHandle) -> Port {
 
        let port_index = self.must_get_port_index(port_handle);
 
        let port = self.ports.remove(port_index);
 
        debug_assert!(!port.associated_with_peer);
 
        dbg_code!(assert!(!port.associated_with_peer));
 
        return port;
 
    }
 

	
 
    /// Adds a new peer. This must be called for every port, no matter the
 
    /// component the channel is connected to. If a `CompHandle` is supplied,
 
    /// then it will be used to add the peer. Otherwise it will be retrieved
 
    /// from the runtime using its ID.
 
    pub(crate) fn add_peer(&mut self, port_handle: LocalPortHandle, sched_ctx: &SchedulerCtx, peer_comp_id: CompId, handle: Option<&CompHandle>) {
 
        let self_id = self.id;
 
        let port = self.get_port_mut(port_handle);
 
        debug_assert_eq!(port.peer_comp_id, peer_comp_id);
 
        debug_assert!(!port.associated_with_peer);
 
        dbg_code!(assert!(!port.associated_with_peer));
 
        if !Self::requires_peer_reference(port, self_id, false) {
 
            return;
 
        }
 

	
 
        dbg_code!(port.associated_with_peer = true);
 
        match self.get_peer_index_by_id(peer_comp_id) {
 
@@ -129,13 +129,13 @@ impl CompCtx {
 
        let port = self.get_port_mut(port_handle);
 
        debug_assert_eq!(port.peer_comp_id, peer_id);
 
        if !Self::requires_peer_reference(port, self_id, also_remove_if_closed) {
 
            return;
 
        }
 

	
 
        debug_assert!(port.associated_with_peer);
 
        dbg_code!(assert!(port.associated_with_peer));
 
        dbg_code!(port.associated_with_peer = false);
 
        let peer_index = self.get_peer_index_by_id(peer_id).unwrap();
 
        let peer = &mut self.peers[peer_index];
 
        peer.num_associated_ports -= 1;
 
        if peer.num_associated_ports == 0 {
 
            let mut peer = self.peers.remove(peer_index);
src/runtime2/runtime.rs
Show inline comments
 
@@ -106,13 +106,13 @@ impl CompHandle {
 
        let old_count = self.num_handles.fetch_add(1, Ordering::AcqRel);
 
        debug_assert!(old_count > 0); // because we should never be able to retrieve a handle when the component is (being) destroyed
 
    }
 

	
 
    /// Returns the `CompKey` to the component if it should be destroyed
 
    pub(crate) fn decrement_users(&mut self) -> Option<CompKey> {
 
        debug_assert!(!self.decremented, "illegal to 'decrement_users' twice");
 
        dbg_code!(assert!(self.decremented, "illegal to 'decrement_users' twice"));
 
        let old_count = self.num_handles.fetch_sub(1, Ordering::AcqRel);
 
        let new_count = old_count - 1;
 
        dbg_code!(self.decremented = true);
 
        if new_count == 0 {
 
            return Some(unsafe{ self.id.upgrade() });
 
        }
 
@@ -120,13 +120,13 @@ impl CompHandle {
 
        return None;
 
    }
 
}
 

	
 
impl Clone for CompHandle {
 
    fn clone(&self) -> Self {
 
        debug_assert!(!self.decremented, "illegal to clone after 'decrement_users'");
 
        dbg_code!(assert!(!self.decremented, "illegal to clone after 'decrement_users'"));
 
        self.increment_users();
 
        return CompHandle{
 
            target: self.target,
 
            id: self.id,
 
            #[cfg(debug_assertions)] decremented: false,
 
        };
 
@@ -134,20 +134,20 @@ impl Clone for CompHandle {
 
}
 

	
 
impl std::ops::Deref for CompHandle {
 
    type Target = CompPublic;
 

	
 
    fn deref(&self) -> &Self::Target {
 
        debug_assert!(!self.decremented); // cannot access if control is relinquished
 
        dbg_code!(assert!(!self.decremented)); // cannot access if control is relinquished
 
        return unsafe{ &*self.target };
 
    }
 
}
 

	
 
impl Drop for CompHandle {
 
    fn drop(&mut self) {
 
        debug_assert!(self.decremented, "need call to 'decrement_users' before dropping");
 
        dbg_code!(assert!(self.decremented, "need call to 'decrement_users' before dropping"));
 
    }
 
}
 

	
 
// -----------------------------------------------------------------------------
 
// Runtime
 
// -----------------------------------------------------------------------------
src/runtime2/scheduler.rs
Show inline comments
 
@@ -30,19 +30,12 @@ impl<'a> SchedulerCtx<'a> {
 

	
 
    pub(crate) fn log(&self, text: &str) {
 
        if self.logging_enabled {
 
            println!("[s:{:02}, c:{:03}] {}", self.id, self.comp, text);
 
        }
 
    }
 

	
 
    // TODO: Obviously remove, but useful for testing
 
    pub(crate) fn log_special(&self, text: &str) {
 
        if self.logging_enabled {
 
            println!("[s:{:02}, c:{:03}] *** *** {}", self.id, self.comp, text);
 
        }
 
    }
 
}
 

	
 
impl Scheduler {
 
    // public interface to thread
 

	
 
    pub fn new(runtime: Arc<RuntimeInner>, scheduler_id: u32, debug_logging: bool) -> Self {
src/runtime2/store/component.rs
Show inline comments
 
@@ -83,13 +83,13 @@ impl ComponentReservation {
 
        }
 
    }
 
}
 

	
 
impl Drop for ComponentReservation {
 
    fn drop(&mut self) {
 
        debug_assert!(self.submitted);
 
        dbg_code!( assert!(self.submitted) );
 
    }
 
}
 

	
 
impl<T: Sized> ComponentStore<T> {
 
    pub fn new(initial_size: usize) -> Self {
 
        Self::assert_valid_size(initial_size);
 
@@ -184,18 +184,21 @@ impl<T: Sized> ComponentStore<T> {
 
                shared_lock = self.reallocate(current_size, shared_lock);
 
                continue 'attempt_read;
 
            }
 

	
 
            loop {
 
                let preemptive_read = shared_lock.freelist[read_index & shared_lock.index_mask];
 
                if let Err(actual_read_index) = self.read_head.compare_exchange(
 
                if let Err(_actual_read_index) = self.read_head.compare_exchange(
 
                    read_index, (read_index + 1) & shared_lock.compare_mask,
 
                    Ordering::AcqRel, Ordering::Acquire
 
                ) {
 
                    // We need to try again
 
                    read_index = actual_read_index;
 
                    // TODO: Fix this update loop at some point. When update
 
                    //  loop is disabled, popping the freelist index is not
 
                    //  reliable.
 
                    // read_index = actual_read_index;
 
                    continue 'attempt_read;
 
                }
 

	
 
                // If here then we performed the read
 
                return (shared_lock, preemptive_read);
 
            }
0 comments (0 inline, 0 general)