use std::collections::VecDeque; use super::value::*; use super::store::*; use super::error::*; use crate::protocol::*; use crate::protocol::ast::*; use crate::protocol::type_table::*; macro_rules! debug_enabled { () => { false }; } macro_rules! debug_log { ($format:literal) => { enabled_debug_print!(false, "exec", $format); }; ($format:literal, $($args:expr),*) => { enabled_debug_print!(false, "exec", $format, $($args),*); }; } #[derive(Debug, Clone)] pub(crate) enum ExprInstruction { EvalExpr(ExpressionId), PushValToFront, } #[derive(Debug, Clone)] pub(crate) struct Frame { pub(crate) definition: ProcedureDefinitionId, pub(crate) monomorph_index: usize, pub(crate) position: StatementId, pub(crate) expr_stack: VecDeque, // hack for expression evaluation, evaluated by popping from back pub(crate) expr_values: VecDeque, // hack for expression results, evaluated by popping from front/back pub(crate) max_stack_size: u32, } impl Frame { /// Creates a new execution frame. Does not modify the stack in any way. pub fn new(heap: &Heap, definition_id: ProcedureDefinitionId, _monomorph_type_id: TypeId, monomorph_index: u32) -> Self { let definition = &heap[definition_id]; let outer_scope_id = definition.scope; let first_statement_id = definition.body; // Another not-so-pretty thing that has to be replaced somewhere in the // future... fn determine_max_stack_size(heap: &Heap, scope_id: ScopeId, max_size: &mut u32) { let scope = &heap[scope_id]; // Check current block let cur_size = scope.next_unique_id_in_scope as u32; if cur_size > *max_size { *max_size = cur_size; } // And child blocks for child_scope in &scope.nested { determine_max_stack_size(heap, *child_scope, max_size); } } let mut max_stack_size = 0; determine_max_stack_size(heap, outer_scope_id, &mut max_stack_size); Frame{ definition: definition_id, monomorph_index: monomorph_index as usize, position: first_statement_id.upcast(), expr_stack: VecDeque::with_capacity(128), expr_values: VecDeque::with_capacity(128), max_stack_size, } } /// Prepares a single expression for execution. This involves walking the /// expression tree and putting them in the `expr_stack` such that /// continuously popping from its back will evaluate the expression. The /// results of each expression will be stored by pushing onto `expr_values`. pub fn prepare_single_expression(&mut self, heap: &Heap, expr_id: ExpressionId) { debug_assert!(self.expr_stack.is_empty()); self.expr_values.clear(); // May not be empty if last expression result(s) were discarded self.serialize_expression(heap, expr_id); } /// Prepares multiple expressions for execution (i.e. evaluating all /// function arguments or all elements of an array/union literal). Per /// expression this works the same as `prepare_single_expression`. However /// after each expression is evaluated we insert a `PushValToFront` /// instruction pub fn prepare_multiple_expressions(&mut self, heap: &Heap, expr_ids: &[ExpressionId]) { debug_assert!(self.expr_stack.is_empty()); self.expr_values.clear(); for expr_id in expr_ids { self.expr_stack.push_back(ExprInstruction::PushValToFront); self.serialize_expression(heap, *expr_id); } } /// Performs depth-first serialization of expression tree. Let's not care /// about performance for a temporary runtime implementation fn serialize_expression(&mut self, heap: &Heap, id: ExpressionId) { self.expr_stack.push_back(ExprInstruction::EvalExpr(id)); match &heap[id] { Expression::Assignment(expr) => { self.serialize_expression(heap, expr.left); self.serialize_expression(heap, expr.right); }, Expression::Binding(expr) => { self.serialize_expression(heap, expr.bound_to); self.serialize_expression(heap, expr.bound_from); }, Expression::Conditional(expr) => { self.serialize_expression(heap, expr.test); }, Expression::Binary(expr) => { self.serialize_expression(heap, expr.left); self.serialize_expression(heap, expr.right); }, Expression::Unary(expr) => { self.serialize_expression(heap, expr.expression); }, Expression::Indexing(expr) => { self.serialize_expression(heap, expr.index); self.serialize_expression(heap, expr.subject); }, Expression::Slicing(expr) => { self.serialize_expression(heap, expr.from_index); self.serialize_expression(heap, expr.to_index); self.serialize_expression(heap, expr.subject); }, Expression::Select(expr) => { self.serialize_expression(heap, expr.subject); }, Expression::Literal(expr) => { // Here we only care about literals that have subexpressions match &expr.value { Literal::Null | Literal::True | Literal::False | Literal::Character(_) | Literal::Bytestring(_) | Literal::String(_) | Literal::Integer(_) | Literal::Enum(_) => { // No subexpressions }, Literal::Struct(literal) => { // Note: fields expressions are evaluated in programmer- // specified order. But struct construction expects them // in type-defined order. I might want to come back to // this. let mut _num_pushed = 0; for want_field_idx in 0..literal.fields.len() { for field in &literal.fields { if field.field_idx == want_field_idx { _num_pushed += 1; self.expr_stack.push_back(ExprInstruction::PushValToFront); self.serialize_expression(heap, field.value); } } } debug_assert_eq!(_num_pushed, literal.fields.len()) }, Literal::Union(literal) => { for value_expr_id in &literal.values { self.expr_stack.push_back(ExprInstruction::PushValToFront); self.serialize_expression(heap, *value_expr_id); } }, Literal::Array(value_expr_ids) => { for value_expr_id in value_expr_ids { self.expr_stack.push_back(ExprInstruction::PushValToFront); self.serialize_expression(heap, *value_expr_id); } }, Literal::Tuple(value_expr_ids) => { for value_expr_id in value_expr_ids { self.expr_stack.push_back(ExprInstruction::PushValToFront); self.serialize_expression(heap, *value_expr_id); } } } }, Expression::Cast(expr) => { self.serialize_expression(heap, expr.subject); } Expression::Call(expr) => { for arg_expr_id in &expr.arguments { self.expr_stack.push_back(ExprInstruction::PushValToFront); self.serialize_expression(heap, *arg_expr_id); } }, Expression::Variable(_expr) => { // No subexpressions } } } } pub type EvalResult = Result; #[derive(Debug)] pub enum EvalContinuation { // Returned in both sync and non-sync modes Stepping, // Returned only in sync mode BranchInconsistent, SyncBlockEnd, NewFork, BlockFires(PortId), BlockGet(PortId), Put(PortId, ValueGroup), SelectStart(u32, u32), // (num_cases, num_ports_total) SelectRegisterPort(u32, u32, PortId), // (case_index, port_index_in_case, port_id) SelectWait, // wait until select can continue // Returned only in non-sync mode ComponentTerminated, SyncBlockStart, NewComponent(ProcedureDefinitionId, TypeId, ValueGroup), NewChannel, } // Note: cloning is fine, methinks. cloning all values and the heap regions then // we end up with valid "pointers" to heap regions. #[derive(Debug, Clone)] pub struct Prompt { pub(crate) frames: Vec, pub(crate) store: Store, } impl Prompt { pub fn new(types: &TypeTable, heap: &Heap, def: ProcedureDefinitionId, type_id: TypeId, args: ValueGroup) -> Self { let mut prompt = Self{ frames: Vec::new(), store: Store::new(), }; // Maybe do typechecking in the future? let monomorph_index = types.get_monomorph(type_id).variant.as_procedure().monomorph_index; let new_frame = Frame::new(heap, def, type_id, monomorph_index); let max_stack_size = new_frame.max_stack_size; prompt.frames.push(new_frame); args.into_store(&mut prompt.store); prompt.store.reserve_stack(max_stack_size); prompt } /// Big 'ol function right here. Didn't want to break it up unnecessarily. /// It consists of, in sequence: executing any expressions that should be /// executed before the next statement can be evaluated, then a section that /// performs debug printing, and finally a section that takes the next /// statement and executes it. If the statement requires any expressions to /// be evaluated, then they will be added such that the next time `step` is /// called, all of these expressions are indeed evaluated. pub(crate) fn step(&mut self, types: &TypeTable, heap: &Heap, modules: &[Module], ctx: &mut impl RunContext) -> EvalResult { // Helper function to transfer multiple values from the expression value // array into a heap region (e.g. constructing arrays or structs). fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos { let heap_pos = store.alloc_heap(); // Do the transformation first (because Rust...) for val_idx in 0..num_values { cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone()); } // And now transfer to the heap region let values = &mut store.heap_regions[heap_pos as usize].values; debug_assert!(values.is_empty()); values.reserve(num_values); for _ in 0..num_values { values.push(cur_frame.expr_values.pop_front().unwrap()); } heap_pos } // Helper function to make sure that an index into an aray is valid. fn array_inclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool { let array_len = store.heap_regions[array_heap_pos as usize].values.len(); return idx < 0 || idx >= array_len as i64; } fn array_exclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool { let array_len = store.heap_regions[array_heap_pos as usize].values.len(); return idx < 0 || idx > array_len as i64; } fn construct_array_error(prompt: &Prompt, modules: &[Module], heap: &Heap, expr_id: ExpressionId, heap_pos: u32, idx: i64) -> EvalError { let array_len = prompt.store.heap_regions[heap_pos as usize].values.len(); return EvalError::new_error_at_expr( prompt, modules, heap, expr_id, format!("index {} is out of bounds: array length is {}", idx, array_len) ) } // Checking if we're at the end of execution let cur_frame = self.frames.last_mut().unwrap(); if cur_frame.position.is_invalid() { if heap[cur_frame.definition].kind == ProcedureKind::Function { todo!("End of function without return, return an evaluation error"); } return Ok(EvalContinuation::ComponentTerminated); } debug_log!("Taking step in '{}'", heap[cur_frame.definition].identifier.value.as_str()); // Execute all pending expressions while !cur_frame.expr_stack.is_empty() { let next = cur_frame.expr_stack.pop_back().unwrap(); debug_log!("Expr stack: {:?}", next); match next { ExprInstruction::PushValToFront => { cur_frame.expr_values.rotate_right(1); }, ExprInstruction::EvalExpr(expr_id) => { let expr = &heap[expr_id]; match expr { Expression::Assignment(expr) => { let to = cur_frame.expr_values.pop_back().unwrap().as_ref(); let rhs = cur_frame.expr_values.pop_back().unwrap(); // Note: although not pretty, the assignment operator takes ownership // of the right-hand side value when possible. So we do not drop the // rhs's optionally owned heap data. let rhs = self.store.read_take_ownership(rhs); apply_assignment_operator(&mut self.store, to, expr.operation, rhs); }, Expression::Binding(_expr) => { let bind_to = cur_frame.expr_values.pop_back().unwrap(); let bind_from = cur_frame.expr_values.pop_back().unwrap(); let bind_to_heap_pos = bind_to.get_heap_pos(); let bind_from_heap_pos = bind_from.get_heap_pos(); let result = apply_binding_operator(&mut self.store, bind_to, bind_from); self.store.drop_value(bind_to_heap_pos); self.store.drop_value(bind_from_heap_pos); cur_frame.expr_values.push_back(Value::Bool(result)); }, Expression::Conditional(expr) => { // Evaluate testing expression, then extend the // expression stack with the appropriate expression let test_result = cur_frame.expr_values.pop_back().unwrap().as_bool(); if test_result { cur_frame.serialize_expression(heap, expr.true_expression); } else { cur_frame.serialize_expression(heap, expr.false_expression); } }, Expression::Binary(expr) => { let lhs = cur_frame.expr_values.pop_back().unwrap(); let rhs = cur_frame.expr_values.pop_back().unwrap(); let result = apply_binary_operator(&mut self.store, &lhs, expr.operation, &rhs); cur_frame.expr_values.push_back(result); self.store.drop_value(lhs.get_heap_pos()); self.store.drop_value(rhs.get_heap_pos()); }, Expression::Unary(expr) => { let val = cur_frame.expr_values.pop_back().unwrap(); let result = apply_unary_operator(&mut self.store, expr.operation, &val); cur_frame.expr_values.push_back(result); self.store.drop_value(val.get_heap_pos()); }, Expression::Indexing(_expr) => { // Evaluate index. Never heap allocated so we do // not have to drop it. let index = cur_frame.expr_values.pop_back().unwrap(); let index = self.store.maybe_read_ref(&index); debug_assert!(index.is_integer()); let index = if index.is_signed_integer() { index.as_signed_integer() as i64 } else { index.as_unsigned_integer() as i64 }; let subject = cur_frame.expr_values.pop_back().unwrap(); let (deallocate_heap_pos, value_to_push) = match subject { Value::Ref(value_ref) => { // Our expression stack value is a reference to something that // exists in the normal stack/heap. We don't want to deallocate // this thing. Rather we want to return a reference to it. let subject = self.store.read_ref(value_ref); let subject_heap_pos = match subject { Value::String(v) => *v, Value::Array(v) => *v, Value::Message(v) => *v, _ => unreachable!(), }; if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) { return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index)); } (None, Value::Ref(ValueId::Heap(subject_heap_pos, index as u32))) }, _ => { // Our value lives on the expression stack, hence we need to // clone whatever we're referring to. Then drop the subject. let subject_heap_pos = match &subject { Value::String(v) => *v, Value::Array(v) => *v, Value::Message(v) => *v, _ => unreachable!(), }; if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) { return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index)); } let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, index as u32)); (Some(subject_heap_pos), self.store.clone_value(subject_indexed)) }, }; cur_frame.expr_values.push_back(value_to_push); self.store.drop_value(deallocate_heap_pos); }, Expression::Slicing(expr) => { // Evaluate indices let from_index = cur_frame.expr_values.pop_back().unwrap(); let from_index = self.store.maybe_read_ref(&from_index); let to_index = cur_frame.expr_values.pop_back().unwrap(); let to_index = self.store.maybe_read_ref(&to_index); debug_assert!(from_index.is_integer() && to_index.is_integer()); let from_index = if from_index.is_signed_integer() { from_index.as_signed_integer() } else { from_index.as_unsigned_integer() as i64 }; let to_index = if to_index.is_signed_integer() { to_index.as_signed_integer() } else { to_index.as_unsigned_integer() as i64 }; // Dereference subject if needed let subject = cur_frame.expr_values.pop_back().unwrap(); let deref_subject = self.store.maybe_read_ref(&subject); // Slicing needs to produce a copy anyway (with the // current evaluator implementation) enum ValueKind{ Array, String, Message } let (value_kind, array_heap_pos) = match deref_subject { Value::Array(v) => (ValueKind::Array, *v), Value::String(v) => (ValueKind::String, *v), Value::Message(v) => (ValueKind::Message, *v), _ => unreachable!() }; if array_inclusive_index_is_invalid(&self.store, array_heap_pos, from_index) { return Err(construct_array_error(self, modules, heap, expr.from_index, array_heap_pos, from_index)); } if array_exclusive_index_is_invalid(&self.store, array_heap_pos, to_index) { return Err(construct_array_error(self, modules, heap, expr.to_index, array_heap_pos, to_index)); } // Again: would love to push directly, but rust... let new_heap_pos = self.store.alloc_heap(); debug_assert!(self.store.heap_regions[new_heap_pos as usize].values.is_empty()); if to_index > from_index { let from_index = from_index as usize; let to_index = to_index as usize; let mut values = Vec::with_capacity(to_index - from_index); for idx in from_index..to_index { let value = self.store.heap_regions[array_heap_pos as usize].values[idx].clone(); values.push(self.store.clone_value(value)); } self.store.heap_regions[new_heap_pos as usize].values = values; } // else: empty range cur_frame.expr_values.push_back(match value_kind { ValueKind::Array => Value::Array(new_heap_pos), ValueKind::String => Value::String(new_heap_pos), ValueKind::Message => Value::Message(new_heap_pos), }); // Dropping the original subject, because we don't // want to drop something on the stack self.store.drop_value(subject.get_heap_pos()); }, Expression::Select(expr) => { let subject= cur_frame.expr_values.pop_back().unwrap(); let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index]; let field_idx = mono_data.expr_info[expr.type_index as usize].variant.as_select() as u32; // Note: same as above: clone if value lives on expr stack, simply // refer to it if it already lives on the stack/heap. let (deallocate_heap_pos, value_to_push) = match subject { Value::Ref(value_ref) => { let subject = self.store.read_ref(value_ref); let subject_heap_pos = match expr.kind { SelectKind::StructField(_) => subject.as_struct(), SelectKind::TupleMember(_) => subject.as_tuple(), }; (None, Value::Ref(ValueId::Heap(subject_heap_pos, field_idx))) }, _ => { let subject_heap_pos = match expr.kind { SelectKind::StructField(_) => subject.as_struct(), SelectKind::TupleMember(_) => subject.as_tuple(), }; let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, field_idx)); (Some(subject_heap_pos), self.store.clone_value(subject_indexed)) }, }; cur_frame.expr_values.push_back(value_to_push); self.store.drop_value(deallocate_heap_pos); }, Expression::Literal(expr) => { let value = match &expr.value { Literal::Null => Value::Null, Literal::True => Value::Bool(true), Literal::False => Value::Bool(false), Literal::Character(lit_value) => Value::Char(*lit_value), Literal::Bytestring(lit_value) => { let heap_pos = self.store.alloc_heap(); let values = &mut self.store.heap_regions[heap_pos as usize].values; debug_assert!(values.is_empty()); values.reserve(lit_value.len()); for byte in lit_value { values.push(Value::UInt8(*byte)); } Value::Array(heap_pos) } Literal::String(lit_value) => { let heap_pos = self.store.alloc_heap(); let values = &mut self.store.heap_regions[heap_pos as usize].values; let value = lit_value.as_str(); debug_assert!(values.is_empty()); values.reserve(value.len()); for character in value.as_bytes() { debug_assert!(character.is_ascii()); values.push(Value::Char(*character as char)); } Value::String(heap_pos) } Literal::Integer(lit_value) => { use ConcreteTypePart as CTP; let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index]; let type_id = mono_data.expr_info[expr.type_index as usize].type_id; let concrete_type = &types.get_monomorph(type_id).concrete_type; debug_assert_eq!(concrete_type.parts.len(), 1); match concrete_type.parts[0] { CTP::UInt8 => Value::UInt8(lit_value.unsigned_value as u8), CTP::UInt16 => Value::UInt16(lit_value.unsigned_value as u16), CTP::UInt32 => Value::UInt32(lit_value.unsigned_value as u32), CTP::UInt64 => Value::UInt64(lit_value.unsigned_value as u64), CTP::SInt8 => Value::SInt8(lit_value.unsigned_value as i8), CTP::SInt16 => Value::SInt16(lit_value.unsigned_value as i16), CTP::SInt32 => Value::SInt32(lit_value.unsigned_value as i32), CTP::SInt64 => Value::SInt64(lit_value.unsigned_value as i64), _ => unreachable!("got concrete type {:?} for integer literal at expr {:?}", concrete_type, expr_id), } } Literal::Struct(lit_value) => { let heap_pos = transfer_expression_values_front_into_heap( cur_frame, &mut self.store, lit_value.fields.len() ); Value::Struct(heap_pos) } Literal::Enum(lit_value) => { Value::Enum(lit_value.variant_idx as i64) } Literal::Union(lit_value) => { let heap_pos = transfer_expression_values_front_into_heap( cur_frame, &mut self.store, lit_value.values.len() ); Value::Union(lit_value.variant_idx as i64, heap_pos) } Literal::Array(lit_value) => { let heap_pos = transfer_expression_values_front_into_heap( cur_frame, &mut self.store, lit_value.len() ); Value::Array(heap_pos) } Literal::Tuple(lit_value) => { let heap_pos = transfer_expression_values_front_into_heap( cur_frame, &mut self.store, lit_value.len() ); Value::Tuple(heap_pos) } }; cur_frame.expr_values.push_back(value); }, Expression::Cast(expr) => { let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index]; let type_id = mono_data.expr_info[expr.type_index as usize].type_id; let concrete_type = &types.get_monomorph(type_id).concrete_type; // Typechecking reduced this to two cases: either we // have casting noop (same types), or we're casting // between integer/bool/char types. let subject = cur_frame.expr_values.pop_back().unwrap(); match apply_casting(&mut self.store, concrete_type, &subject) { Ok(value) => cur_frame.expr_values.push_back(value), Err(msg) => { return Err(EvalError::new_error_at_expr(self, modules, heap, expr.this.upcast(), msg)); } } self.store.drop_value(subject.get_heap_pos()); } Expression::Call(expr) => { // If we're dealing with a builtin we don't do any // fancy shenanigans at all, just push the result. match expr.method { Method::Get => { let value = cur_frame.expr_values.pop_front().unwrap(); let value = self.store.maybe_read_ref(&value).clone(); let port_id = if let Value::Input(port_id) = value { port_id } else { unreachable!("executor calling 'get' on value {:?}", value) }; match ctx.performed_get(port_id) { Some(result) => { // We have the result. Merge the `ValueGroup` with the // stack/heap storage. debug_assert_eq!(result.values.len(), 1); result.into_stack(&mut cur_frame.expr_values, &mut self.store); }, None => { // Don't have the result yet, prepare the expression to // get run again after we've received a message. cur_frame.expr_values.push_front(value.clone()); cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id)); return Ok(EvalContinuation::BlockGet(port_id)); } } }, Method::Put => { let port_value = cur_frame.expr_values.pop_front().unwrap(); let deref_port_value = self.store.maybe_read_ref(&port_value).clone(); let port_id = if let Value::Output(port_id) = deref_port_value { port_id } else { unreachable!("executor calling 'put' on value {:?}", deref_port_value) }; let msg_value = cur_frame.expr_values.pop_front().unwrap(); let deref_msg_value = self.store.maybe_read_ref(&msg_value).clone(); if ctx.performed_put(port_id) { // We're fine, deallocate in case the expression value stack // held an owned value self.store.drop_value(msg_value.get_heap_pos()); } else { // Prepare to execute again cur_frame.expr_values.push_front(msg_value); cur_frame.expr_values.push_front(port_value); cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id)); let value_group = ValueGroup::from_store(&self.store, &[deref_msg_value]); return Ok(EvalContinuation::Put(port_id, value_group)); } }, Method::Fires => { let port_value = cur_frame.expr_values.pop_front().unwrap(); let port_value_deref = self.store.maybe_read_ref(&port_value).clone(); let port_id = port_value_deref.as_port_id(); match ctx.fires(port_id) { None => { cur_frame.expr_values.push_front(port_value); cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr_id)); return Ok(EvalContinuation::BlockFires(port_id)); }, Some(value) => { cur_frame.expr_values.push_back(value); } } }, Method::Create => { let length_value = cur_frame.expr_values.pop_front().unwrap(); let length_value = self.store.maybe_read_ref(&length_value); let length = if length_value.is_signed_integer() { let length_value = length_value.as_signed_integer(); if length_value < 0 { return Err(EvalError::new_error_at_expr( self, modules, heap, expr_id, format!("got length '{}', can only create a message with a non-negative length", length_value) )); } length_value as u64 } else { debug_assert!(length_value.is_unsigned_integer()); length_value.as_unsigned_integer() }; let heap_pos = self.store.alloc_heap(); let values = &mut self.store.heap_regions[heap_pos as usize].values; debug_assert!(values.is_empty()); values.resize(length as usize, Value::UInt8(0)); cur_frame.expr_values.push_back(Value::Message(heap_pos)); }, Method::Length => { let value = cur_frame.expr_values.pop_front().unwrap(); let value_heap_pos = value.get_heap_pos(); let value = self.store.maybe_read_ref(&value); let heap_pos = match value { Value::Array(pos) => *pos, Value::String(pos) => *pos, _ => unreachable!("length(...) on {:?}", value), }; let len = self.store.heap_regions[heap_pos as usize].values.len(); // TODO: @PtrInt cur_frame.expr_values.push_back(Value::UInt32(len as u32)); self.store.drop_value(value_heap_pos); }, Method::Assert => { let value = cur_frame.expr_values.pop_front().unwrap(); let value = self.store.maybe_read_ref(&value).clone(); if !value.as_bool() { return Ok(EvalContinuation::BranchInconsistent) } }, Method::Print => { // Convert the runtime-variant of a string // into an actual string. let value = cur_frame.expr_values.pop_front().unwrap(); let mut is_literal_string = value.get_heap_pos().is_some(); let value = self.store.maybe_read_ref(&value); let value_heap_pos = value.as_string(); let elements = &self.store.heap_regions[value_heap_pos as usize].values; let mut message = String::with_capacity(elements.len()); for element in elements { message.push(element.as_char()); } // Drop the heap-allocated value from the // store if is_literal_string { self.store.drop_heap_pos(value_heap_pos); } println!("{}", message); }, Method::SelectStart => { let num_cases = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32(); let num_ports = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32(); return Ok(EvalContinuation::SelectStart(num_cases, num_ports)); }, Method::SelectRegisterCasePort => { let case_index = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32(); let port_index = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_uint32(); let port_value = self.store.maybe_read_ref(&cur_frame.expr_values.pop_front().unwrap()).as_port_id(); return Ok(EvalContinuation::SelectRegisterPort(case_index, port_index, port_value)); }, Method::SelectWait => { match ctx.performed_select_wait() { Some(select_index) => { cur_frame.expr_values.push_back(Value::UInt32(select_index)); }, None => { cur_frame.expr_stack.push_back(ExprInstruction::EvalExpr(expr.this.upcast())); return Ok(EvalContinuation::SelectWait) }, } }, Method::ComponentRandomU32 | Method::ComponentTcpClient => { debug_assert_eq!(heap[expr.procedure].parameters.len(), cur_frame.expr_values.len()); debug_assert_eq!(heap[cur_frame.position].as_new().expression, expr.this); }, Method::UserComponent => { // This is actually handled by the evaluation // of the statement. debug_assert_eq!(heap[expr.procedure].parameters.len(), cur_frame.expr_values.len()); debug_assert_eq!(heap[cur_frame.position].as_new().expression, expr.this); }, Method::UserFunction => { // Push a new frame. Note that all expressions have // been pushed to the front, so they're in the order // of the definition. let num_args = expr.arguments.len(); // Determine stack boundaries let cur_stack_boundary = self.store.cur_stack_boundary; let new_stack_boundary = self.store.stack.len(); // Push new boundary and function arguments for new frame self.store.stack.push(Value::PrevStackBoundary(cur_stack_boundary as isize)); for _ in 0..num_args { let argument = self.store.read_take_ownership(cur_frame.expr_values.pop_front().unwrap()); self.store.stack.push(argument); } // Determine the monomorph index of the function we're calling let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index]; let (type_id, monomorph_index) = mono_data.expr_info[expr.type_index as usize].variant.as_procedure(); // Push the new frame and reserve its stack size let new_frame = Frame::new(heap, expr.procedure, type_id, monomorph_index); let new_stack_size = new_frame.max_stack_size; self.frames.push(new_frame); self.store.cur_stack_boundary = new_stack_boundary; self.store.reserve_stack(new_stack_size); // To simplify the logic a little bit we will now // return and ask our caller to call us again return Ok(EvalContinuation::Stepping); } } }, Expression::Variable(expr) => { let variable = &heap[expr.declaration.unwrap()]; let ref_value = if expr.used_as_binding_target { Value::Binding(variable.unique_id_in_scope as StackPos) } else { Value::Ref(ValueId::Stack(variable.unique_id_in_scope as StackPos)) }; cur_frame.expr_values.push_back(ref_value); } } } } } debug_log!("Frame [{:?}] at {:?}", cur_frame.definition, cur_frame.position); if debug_enabled!() { debug_log!("Expression value stack (size = {}):", cur_frame.expr_values.len()); for (_stack_idx, _stack_val) in cur_frame.expr_values.iter().enumerate() { debug_log!(" [{:03}] {:?}", _stack_idx, _stack_val); } debug_log!("Stack (size = {}):", self.store.stack.len()); for (_stack_idx, _stack_val) in self.store.stack.iter().enumerate() { debug_log!(" [{:03}] {:?}", _stack_idx, _stack_val); } debug_log!("Heap:"); for (_heap_idx, _heap_region) in self.store.heap_regions.iter().enumerate() { let _is_free = self.store.free_regions.iter().any(|idx| *idx as usize == _heap_idx); debug_log!(" [{:03}] in_use: {}, len: {}, vals: {:?}", _heap_idx, !_is_free, _heap_region.values.len(), &_heap_region.values); } } // No (more) expressions to evaluate. So evaluate statement (that may // depend on the result on the last evaluated expression(s)) let stmt = &heap[cur_frame.position]; let return_value = match stmt { Statement::Block(stmt) => { debug_assert!(stmt.statements.is_empty() || stmt.next == stmt.statements[0]); cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) }, Statement::EndBlock(stmt) => { let block = &heap[stmt.start_block]; let scope = &heap[block.scope]; self.store.clear_stack(scope.first_unique_id_in_scope as usize); cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) }, Statement::Local(stmt) => { match stmt { LocalStatement::Memory(stmt) => { dbg_code!({ let variable = &heap[stmt.variable]; debug_assert!(match self.store.read_ref(ValueId::Stack(variable.unique_id_in_scope as u32)) { Value::Unassigned => false, _ => true, }); }); cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) }, LocalStatement::Channel(stmt) => { // Need to create a new channel by requesting it from // the runtime. match ctx.created_channel() { None => { // No channel is pending. So request one Ok(EvalContinuation::NewChannel) }, Some((put_port, get_port)) => { self.store.write(ValueId::Stack(heap[stmt.from].unique_id_in_scope as u32), put_port); self.store.write(ValueId::Stack(heap[stmt.to].unique_id_in_scope as u32), get_port); cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) } } } } }, Statement::Labeled(stmt) => { cur_frame.position = stmt.body; Ok(EvalContinuation::Stepping) }, Statement::If(stmt) => { debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for if statement"); let test_value = cur_frame.expr_values.pop_back().unwrap(); let test_value = self.store.maybe_read_ref(&test_value).as_bool(); if test_value { cur_frame.position = stmt.true_case.body; } else if let Some(false_body) = stmt.false_case { cur_frame.position = false_body.body; } else { // Not true, and no false body cur_frame.position = stmt.end_if.upcast(); } Ok(EvalContinuation::Stepping) }, Statement::EndIf(stmt) => { cur_frame.position = stmt.next; let if_stmt = &heap[stmt.start_if]; debug_assert_eq!( heap[if_stmt.true_case.scope].first_unique_id_in_scope, heap[if_stmt.false_case.unwrap_or(if_stmt.true_case).scope].first_unique_id_in_scope, ); let scope = &heap[if_stmt.true_case.scope]; self.store.clear_stack(scope.first_unique_id_in_scope as usize); Ok(EvalContinuation::Stepping) }, Statement::While(stmt) => { debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for while statement"); let test_value = cur_frame.expr_values.pop_back().unwrap(); let test_value = self.store.maybe_read_ref(&test_value).as_bool(); if test_value { cur_frame.position = stmt.body; } else { cur_frame.position = stmt.end_while.upcast(); } Ok(EvalContinuation::Stepping) }, Statement::EndWhile(stmt) => { cur_frame.position = stmt.next; let start_while = &heap[stmt.start_while]; let scope = &heap[start_while.scope]; self.store.clear_stack(scope.first_unique_id_in_scope as usize); Ok(EvalContinuation::Stepping) }, Statement::Break(stmt) => { cur_frame.position = stmt.target.upcast(); Ok(EvalContinuation::Stepping) }, Statement::Continue(stmt) => { cur_frame.position = stmt.target.upcast(); Ok(EvalContinuation::Stepping) }, Statement::Synchronous(stmt) => { cur_frame.position = stmt.body; Ok(EvalContinuation::SyncBlockStart) }, Statement::EndSynchronous(stmt) => { cur_frame.position = stmt.next; let start_synchronous = &heap[stmt.start_sync]; let scope = &heap[start_synchronous.scope]; self.store.clear_stack(scope.first_unique_id_in_scope as usize); Ok(EvalContinuation::SyncBlockEnd) }, Statement::Fork(stmt) => { if stmt.right_body.is_none() { // No reason to fork cur_frame.position = stmt.left_body; } else { // Need to fork if let Some(go_left) = ctx.performed_fork() { // Runtime has created a fork if go_left { cur_frame.position = stmt.left_body; } else { cur_frame.position = stmt.right_body.unwrap(); } } else { // Request the runtime to create a fork of the current // branch return Ok(EvalContinuation::NewFork); } } Ok(EvalContinuation::Stepping) }, Statement::EndFork(stmt) => { cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) }, Statement::Select(stmt) => { // This is a trampoline for the statements that were placed by // the AST transformation pass cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) }, Statement::EndSelect(stmt) => { cur_frame.position = stmt.next; let start_select = &heap[stmt.start_select]; if let Some(select_case) = start_select.cases.first() { let scope = &heap[select_case.scope]; self.store.clear_stack(scope.first_unique_id_in_scope as usize); } Ok(EvalContinuation::Stepping) }, Statement::Return(_stmt) => { debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for return statement"); // The preceding frame has executed a call, so is expecting the // return expression on its expression value stack. Note that // we may be returning a reference to something on our stack, // so we need to read that value and clone it. let return_value = cur_frame.expr_values.pop_back().unwrap(); let return_value = match return_value { Value::Ref(value_id) => self.store.read_copy(value_id), _ => return_value, }; // Pre-emptively pop our stack frame self.frames.pop(); // Clean up our section of the stack self.store.clear_stack(0); self.store.stack.truncate(self.store.cur_stack_boundary + 1); let prev_stack_idx = self.store.stack.pop().unwrap().as_stack_boundary(); // TODO: Temporary hack for testing, remove at some point if self.frames.is_empty() { debug_assert!(prev_stack_idx == -1); debug_assert!(self.store.stack.len() == 0); self.store.stack.push(return_value); return Ok(EvalContinuation::ComponentTerminated); } debug_assert!(prev_stack_idx >= 0); // Return to original state of stack frame self.store.cur_stack_boundary = prev_stack_idx as usize; let cur_frame = self.frames.last_mut().unwrap(); cur_frame.expr_values.push_back(return_value); // We just returned to the previous frame, which might be in // the middle of evaluating expressions for a particular // statement. So we don't want to enter the code below. return Ok(EvalContinuation::Stepping); }, Statement::Goto(stmt) => { cur_frame.position = stmt.target.upcast(); Ok(EvalContinuation::Stepping) }, Statement::New(stmt) => { let call_expr = &heap[stmt.expression]; debug_assert_eq!( cur_frame.expr_values.len(), heap[call_expr.procedure].parameters.len(), "mismatch in expr stack size and number of arguments for new statement" ); let mono_data = &heap[cur_frame.definition].monomorphs[cur_frame.monomorph_index]; let type_id = mono_data.expr_info[call_expr.type_index as usize].variant.as_procedure().0; // Note that due to expression value evaluation they exist in // reverse order on the stack. // TODO: Revise this code, keep it as is to be compatible with current runtime let mut args = Vec::new(); while let Some(value) = cur_frame.expr_values.pop_front() { args.push(value); } // Construct argument group, thereby copying heap regions let argument_group = ValueGroup::from_store(&self.store, &args); // Clear any heap regions for arg in &args { self.store.drop_value(arg.get_heap_pos()); } cur_frame.position = stmt.next; Ok(EvalContinuation::NewComponent(call_expr.procedure, type_id, argument_group)) }, Statement::Expression(stmt) => { // The expression has just been completely evaluated. Some // values might have remained on the expression value stack. // cur_frame.expr_values.clear(); PROPER CLEARING cur_frame.position = stmt.next; Ok(EvalContinuation::Stepping) }, }; assert!( cur_frame.expr_values.is_empty(), "This is a debugging assertion that will fail if you perform expressions without \ assigning to anything. This should be completely valid, and this assertion should be \ replaced by something that clears the expression values if needed, but I'll keep this \ in for now for debugging purposes." ); // If the next statement requires evaluating expressions then we push // these onto the expression stack. This way we will evaluate this // stack in the next loop, then evaluate the statement using the result // from the expression evaluation. if !cur_frame.position.is_invalid() { let stmt = &heap[cur_frame.position]; match stmt { Statement::Local(stmt) => { if let LocalStatement::Memory(stmt) = stmt { // Setup as unassigned, when we execute the memory // statement (after evaluating expression), it should no // longer be `Unassigned`. let variable = &heap[stmt.variable]; self.store.write(ValueId::Stack(variable.unique_id_in_scope as u32), Value::Unassigned); cur_frame.prepare_single_expression(heap, stmt.initial_expr.upcast()); } }, Statement::If(stmt) => cur_frame.prepare_single_expression(heap, stmt.test), Statement::While(stmt) => cur_frame.prepare_single_expression(heap, stmt.test), Statement::Return(stmt) => { debug_assert_eq!(stmt.expressions.len(), 1); // TODO: @ReturnValues cur_frame.prepare_single_expression(heap, stmt.expressions[0]); }, Statement::New(stmt) => { // Note that we will end up not evaluating the call itself. // Rather we will evaluate its expressions and then // instantiate the component upon reaching the "new" stmt. let call_expr = &heap[stmt.expression]; cur_frame.prepare_multiple_expressions(heap, &call_expr.arguments); }, Statement::Expression(stmt) => { cur_frame.prepare_single_expression(heap, stmt.expression); } _ => {}, } } return_value } /// Constructs an error at the current expression that lives at the top of /// the expression stack. Falls back to constructing an error at the current /// statement if there is no expression. pub(crate) fn new_error_at_expr(&self, modules: &[Module], heap: &Heap, error_message: String) -> EvalError { let last_frame = self.frames.last().unwrap(); for instruction in last_frame.expr_stack.iter().rev() { if let ExprInstruction::EvalExpr(expression_id) = instruction { return EvalError::new_error_at_expr( self, modules, heap, *expression_id, error_message ); } } // If here then expression stack was empty (cannot have just rotate // instructions) panic!("attempted to construct evaluation error without any expressions to evaluate in frame"); } }