Changeset - b69f417a9972
[Not reviewed]
src/collections/string_pool.rs
Show inline comments
 
use std::ptr::null_mut;
 
use std::hash::{Hash, Hasher};
 
use std::marker::PhantomData;
 
use std::fmt::{Debug, Display, Result as FmtResult};
 
use crate::common::Formatter;
 

	
 
const SLAB_SIZE: usize = u16::MAX as usize;
 

	
 
#[derive(Clone)]
 
pub struct StringRef<'a> {
 
    data: *const u8,
 
    length: usize,
 
    _phantom: PhantomData<&'a [u8]>,
 
}
 

	
 
// As the StringRef is an immutable thing:
 
unsafe impl Sync for StringRef<'_> {}
 
unsafe impl Send for StringRef<'_> {}
 

	
 
impl<'a> StringRef<'a> {
 
    /// `new` constructs a new StringRef whose data is not owned by the
 
    /// `StringPool`, hence cannot have a `'static` lifetime.
 
    pub(crate) fn new(data: &'a [u8]) -> StringRef<'a> {
 
        // This is an internal (compiler) function: so debug_assert that the
 
        // string is valid ascii. Most commonly the input will come from the
 
        // code's source file, which is checked for ASCII-ness anyway.
 
        debug_assert!(data.is_ascii());
 
        let length = data.len();
 
        let data = data.as_ptr();
 
        StringRef{ data, length, _phantom: PhantomData }
 
    }
 

	
 
    pub fn as_str(&self) -> &'a str {
 
        unsafe {
 
            let slice = std::slice::from_raw_parts::<'a, u8>(self.data, self.length);
 
            std::str::from_utf8_unchecked(slice)
 
        }
 
    }
 

	
 
    pub fn as_bytes(&self) -> &'a [u8] {
 
        unsafe {
 
            std::slice::from_raw_parts::<'a, u8>(self.data, self.length)
 
        }
 
    }
 
}
 

	
 
impl<'a> Debug for StringRef<'a> {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
 
        f.write_str("StringRef{ value: ")?;
 
        f.write_str(self.as_str())?;
 
        f.write_str(" }")
 
    }
 
}
 

	
 
impl<'a> Display for StringRef<'a> {
 
    fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
 
        f.write_str(self.as_str())
 
    }
 
}
 

	
 
impl PartialEq for StringRef<'_> {
 
    fn eq(&self, other: &StringRef) -> bool {
 
        self.as_str() == other.as_str()
 
    }
 
}
 

	
 
impl Eq for StringRef<'_> {}
 

	
 
impl Hash for StringRef<'_> {
 
    fn hash<H: Hasher>(&self, state: &mut H) {
 
        state.write(self.as_bytes());
 
    }
 
}
 

	
 
struct StringPoolSlab {
 
    prev: *mut StringPoolSlab,
 
    data: Vec<u8>,
 
    remaining: usize,
 
}
 

	
 
impl StringPoolSlab {
 
    fn new(prev: *mut StringPoolSlab) -> Self {
 
        Self{ prev, data: Vec::with_capacity(SLAB_SIZE), remaining: SLAB_SIZE }
 
    }
 
}
 

	
 
/// StringPool is a ever-growing pool of strings. Strings have a maximum size
 
/// equal to the slab size. The slabs are essentially a linked list to maintain
 
/// pointer-stability of the strings themselves.
 
/// All `StringRef` instances are invalidated when the string pool is dropped
 
pub(crate) struct StringPool {
 
    last: *mut StringPoolSlab,
 
}
 

	
 
impl StringPool {
 
    pub(crate) fn new() -> Self {
 
        // To have some stability we just turn a box into a raw ptr.
 
        let initial_slab = Box::new(StringPoolSlab::new(null_mut()));
 
        let initial_slab = Box::into_raw(initial_slab);
 
        StringPool{
 
            last: initial_slab,
 
        }
 
    }
 

	
 
    /// Interns a string to the `StringPool`, returning a reference to it. The
 
    /// pointer owned by `StringRef` is `'static` as the `StringPool` doesn't
 
    /// reallocate/deallocate until dropped (which only happens at the end of
 
    /// the program.)
 
    pub(crate) fn intern(&mut self, data: &[u8]) -> StringRef<'static> {
 
        // TODO: Large string allocations, if ever needed.
 
        let data_len = data.len();
 
        assert!(data_len <= SLAB_SIZE, "string is too large for slab");
 
        debug_assert!(std::str::from_utf8(data).is_ok(), "string to intern is not valid UTF-8 encoded");
 
        
 
        let mut last = unsafe{&mut *self.last};
 
        if data.len() > last.remaining {
 
            // Doesn't fit: allocate new slab
 
            self.alloc_new_slab();
 
            last = unsafe{&mut *self.last};
 
        }
 

	
 
        // Must fit now, compute hash and put in buffer
 
        debug_assert!(data_len <= last.remaining);
 
        let range_start = last.data.len();
 
        last.data.extend_from_slice(data);
 
        last.remaining -= data_len;
 
        debug_assert_eq!(range_start + data_len, last.data.len());
 

	
 
        unsafe {
 
            let start = last.data.as_ptr().offset(range_start as isize);
 
            StringRef{ data: start, length: data_len, _phantom: PhantomData }
 
        }
 
    }
 

	
 
    fn alloc_new_slab(&mut self) {
 
        let new_slab = Box::new(StringPoolSlab::new(self.last));
 
        let new_slab = Box::into_raw(new_slab);
 
        self.last = new_slab;
 
    }
 
}
 

	
 
impl Drop for StringPool {
 
    fn drop(&mut self) {
 
        let mut new_slab = self.last;
 
        while !new_slab.is_null() {
 
            let cur_slab = new_slab;
 
            unsafe {
 
                new_slab = (*cur_slab).prev;
 
                Box::from_raw(cur_slab); // consume and deallocate
 
            }
 
        }
 
    }
 
}
 

	
 
// String pool cannot be cloned, and the created `StringRef` instances remain
 
// allocated until the end of the program, so it is always safe to send. It is
 
// also sync in the sense that it becomes an immutable thing after compilation,
 
// but lets not derive that if we would ever become a multithreaded compiler in
 
// the future.
 
unsafe impl Send for StringPool {}
 

	
 
#[cfg(test)]
 
mod tests {
 
    use super::*;
 

	
 
    #[test]
 
    fn test_string_just_fits() {
 
        let large = "0".repeat(SLAB_SIZE);
 
        let mut pool = StringPool::new();
 
        let interned = pool.intern(large.as_bytes());
 
        assert_eq!(interned.as_str(), large);
 
    }
 

	
 
    #[test]
 
    #[should_panic]
 
    fn test_string_too_large() {
 
        let large = "0".repeat(SLAB_SIZE + 1);
 
        let mut pool = StringPool::new();
 
        let _interned = pool.intern(large.as_bytes());
 
    }
 

	
 
    #[test]
 
    fn test_lots_of_small_allocations() {
 
        const NUM_PER_SLAB: usize = 32;
 
        const NUM_SLABS: usize = 4;
 

	
 
        let to_intern = "0".repeat(SLAB_SIZE / NUM_PER_SLAB);
 
        let mut pool = StringPool::new();
 

	
 
        let mut last_slab = pool.last;
 
        let mut all_refs = Vec::new();
 

	
 
        // Fill up first slab
 
        for _alloc_idx in 0..NUM_PER_SLAB {
 
            let interned = pool.intern(to_intern.as_bytes());
 
            all_refs.push(interned);
 
            assert!(std::ptr::eq(last_slab, pool.last));
 
        }
 

	
 
        for _slab_idx in 0..NUM_SLABS-1 {
 
            for alloc_idx in 0..NUM_PER_SLAB {
 
                let interned = pool.intern(to_intern.as_bytes());
 
                all_refs.push(interned);
 

	
 
                if alloc_idx == 0 {
 
                    // First allocation produces a new slab
 
                    assert!(!std::ptr::eq(last_slab, pool.last));
 
                    last_slab = pool.last;
 
                } else {
 
                    assert!(std::ptr::eq(last_slab, pool.last));
 
                }
 
            }
 
        }
 

	
 
        // All strings are still correct
 
        for string_ref in all_refs {
 
            assert_eq!(string_ref.as_str(), to_intern);
 
        }
 
    }
 
}
 
\ No newline at end of file
src/ffi/mod.rs
Show inline comments
 
@@ -43,391 +43,394 @@ impl StoredError {
 
            stored_error.debug_store(error);
 
        })
 
    }
 
    fn bytes_store(&mut self, bytes: &[u8]) {
 
        let _ = self.buf.write_all(bytes);
 
        self.buf.push(Self::NULL_TERMINATOR);
 
    }
 
    fn tl_bytes_store(bytes: &[u8]) {
 
        STORED_ERROR.with(|stored_error| {
 
            let mut stored_error = stored_error.borrow_mut();
 
            stored_error.clear();
 
            stored_error.bytes_store(bytes);
 
        })
 
    }
 
    fn tl_clear() {
 
        STORED_ERROR.with(|stored_error| {
 
            let mut stored_error = stored_error.borrow_mut();
 
            stored_error.clear();
 
        })
 
    }
 
    fn tl_bytes_peek() -> (*const u8, usize) {
 
        STORED_ERROR.with(|stored_error| {
 
            let stored_error = stored_error.borrow();
 
            match stored_error.buf.len() {
 
                0 => (core::ptr::null(), 0), // no error!
 
                n => {
 
                    // stores an error of length n-1 AND a NULL TERMINATOR
 
                    (stored_error.buf.as_ptr(), n - 1)
 
                }
 
            }
 
        })
 
    }
 
}
 
thread_local! {
 
    static STORED_ERROR: RefCell<StoredError> = RefCell::new(StoredError::default());
 
}
 

	
 
pub const RW_OK: c_int = 0;
 
pub const RW_TL_ERR: c_int = -1;
 
pub const RW_WRONG_STATE: c_int = -2;
 
pub const RW_LOCK_POISONED: c_int = -3;
 
pub const RW_CLOSE_FAIL: c_int = -4;
 
pub const RW_BAD_FD: c_int = -5;
 
pub const RW_CONNECT_FAILED: c_int = -6;
 
pub const RW_WOULD_BLOCK: c_int = -7;
 
pub const RW_BAD_SOCKADDR: c_int = -8;
 

	
 
///////////////////// REOWOLF //////////////////////////
 

	
 
/// Returns length (via out pointer) and pointer (via return value) of the last Reowolf error.
 
/// - pointer is NULL iff there was no last error
 
/// - data at pointer is null-delimited
 
/// - len does NOT include the length of the null-delimiter
 
/// If len is NULL, it will not written to.
 
#[no_mangle]
 
pub unsafe extern "C" fn reowolf_error_peek(len: *mut usize) -> *const u8 {
 
    let (err_ptr, err_len) = StoredError::tl_bytes_peek();
 
    if !len.is_null() {
 
        len.write(err_len);
 
    }
 
    err_ptr
 
}
 

	
 
///////////////////// PROTOCOL DESCRIPTION //////////////////////////
 

	
 
/// Parses the utf8-encoded string slice to initialize a new protocol description object.
 
/// - On success, initializes `out` and returns 0
 
/// - On failure, stores an error string (see `reowolf_error_peek`) and returns -1
 
#[no_mangle]
 
pub unsafe extern "C" fn protocol_description_parse(
 
    pdl: *const u8,
 
    pdl_len: usize,
 
) -> *mut Arc<ProtocolDescription> {
 
    StoredError::tl_clear();
 
    match ProtocolDescription::parse(&*slice_from_raw_parts(pdl, pdl_len)) {
 
        Ok(new) => Box::into_raw(Box::new(Arc::new(new))),
 
        Err(err) => {
 
            StoredError::tl_bytes_store(err.as_bytes());
 
            std::ptr::null_mut()
 
        }
 
    }
 
}
 

	
 
/// Destroys the given initialized protocol description and frees its resources.
 
#[no_mangle]
 
pub unsafe extern "C" fn protocol_description_destroy(pd: *mut Arc<ProtocolDescription>) {
 
    drop(Box::from_raw(pd))
 
}
 

	
 
/// Given an initialized protocol description, initializes `out` with a clone which can be independently created or destroyed.
 
#[no_mangle]
 
pub unsafe extern "C" fn protocol_description_clone(
 
    pd: &Arc<ProtocolDescription>,
 
) -> *mut Arc<ProtocolDescription> {
 
    Box::into_raw(Box::new(pd.clone()))
 
}
 

	
 
///////////////////// CONNECTOR //////////////////////////
 

	
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_new_logging_with_id(
 
    pd: &Arc<ProtocolDescription>,
 
    path_ptr: *const u8,
 
    path_len: usize,
 
    connector_id: ConnectorId,
 
) -> *mut Connector {
 
    StoredError::tl_clear();
 
    let path_bytes = &*slice_from_raw_parts(path_ptr, path_len);
 
    let path_str = match std::str::from_utf8(path_bytes) {
 
        Ok(path_str) => path_str,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            return std::ptr::null_mut();
 
        }
 
    };
 
    match std::fs::File::create(path_str) {
 
        Ok(file) => {
 
            let file_logger = Box::new(FileLogger::new(connector_id, file));
 
            let c = Connector::new(file_logger, pd.clone(), connector_id);
 
            Box::into_raw(Box::new(c))
 
        }
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            std::ptr::null_mut()
 
        }
 
    }
 
}
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_new_with_id(
 
    pd: &Arc<ProtocolDescription>,
 
    connector_id: ConnectorId,
 
) -> *mut Connector {
 
    let c = Connector::new(Box::new(DummyLogger), pd.clone(), connector_id);
 
    Box::into_raw(Box::new(c))
 
}
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_new_logging(
 
    pd: &Arc<ProtocolDescription>,
 
    path_ptr: *const u8,
 
    path_len: usize,
 
) -> *mut Connector {
 
    connector_new_logging_with_id(pd, path_ptr, path_len, Connector::random_id())
 
}
 

	
 
/// Initializes `out` with a new connector using the given protocol description as its configuration.
 
/// The connector uses the given (internal) connector ID.
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_new(pd: &Arc<ProtocolDescription>) -> *mut Connector {
 
    connector_new_with_id(pd, Connector::random_id())
 
}
 

	
 
/// Destroys the given a pointer to the connector on the heap, freeing its resources.
 
/// Usable in {setup, communication} states.
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_destroy(connector: *mut Connector) {
 
    drop(Box::from_raw(connector))
 
}
 

	
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_print_debug(connector: &mut Connector) {
 
    println!("Debug print dump {:#?}", connector);
 
}
 

	
 
/// Given an initialized connector in setup or connecting state,
 
/// - Creates a new directed port pair with logical channel putter->getter,
 
/// - adds the ports to the native component's interface,
 
/// - and returns them using the given out pointers.
 
/// Usable in {setup, communication} states.
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_add_port_pair(
 
    connector: &mut Connector,
 
    out_putter: *mut PortId,
 
    out_getter: *mut PortId,
 
) {
 
    let [o, i] = connector.new_port_pair();
 
    if !out_putter.is_null() {
 
        out_putter.write(o);
 
    }
 
    if !out_getter.is_null() {
 
        out_getter.write(i);
 
    }
 
}
 

	
 
/// Given
 
/// - an initialized connector in setup or connecting state,
 
/// - a string slice for the component's identifier in the connector's configured protocol description,
 
/// - a set of ports (represented as a slice; duplicates are ignored) in the native component's interface,
 
/// the connector creates a new (internal) protocol component C, such that the set of native ports are moved to C.
 
/// Usable in {setup, communication} states.
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_add_component(
 
    connector: &mut Connector,
 
    module_ptr: *const u8,
 
    module_len: usize,
 
    ident_ptr: *const u8,
 
    ident_len: usize,
 
    ports_ptr: *const PortId,
 
    ports_len: usize,
 
) -> c_int {
 
    StoredError::tl_clear();
 
    match connector.add_component(
 
        &*slice_from_raw_parts(module_ptr, module_len),
 
        &*slice_from_raw_parts(ident_ptr, ident_len),
 
        &*slice_from_raw_parts(ports_ptr, ports_len),
 
    ) {
 
        Ok(()) => RW_OK,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR
 
        }
 
    }
 
}
 

	
 
/// Given
 
/// - an initialized connector in setup or connecting state,
 
/// - a utf-8 encoded socket address,
 
/// - the logical polarity of P,
 
/// - the "physical" polarity in {Active, Passive} of the endpoint through which P's peer will be discovered,
 
/// returns P, a port newly added to the native interface.
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_add_net_port(
 
    connector: &mut Connector,
 
    port: *mut PortId,
 
    addr: FfiSocketAddr,
 
    port_polarity: Polarity,
 
    endpoint_polarity: EndpointPolarity,
 
) -> c_int {
 
    StoredError::tl_clear();
 
    match connector.new_net_port(port_polarity, addr.into(), endpoint_polarity) {
 
        Ok(p) => {
 
            if !port.is_null() {
 
                port.write(p);
 
            }
 
            RW_OK
 
        }
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR
 
        }
 
    }
 
}
 

	
 
/// Given
 
/// - an initialized connector in setup or connecting state,
 
/// - a utf-8 encoded BIND socket addresses (i.e., "local"),
 
/// - a utf-8 encoded CONNECT socket addresses (i.e., "peer"),
 
/// returns [P, G] via out pointers [putter, getter],
 
/// - where P is a Putter port that sends messages into the socket
 
/// - where G is a Getter port that recvs messages from the socket
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_add_udp_mediator_component(
 
    connector: &mut Connector,
 
    putter: *mut PortId,
 
    getter: *mut PortId,
 
    local_addr: FfiSocketAddr,
 
    peer_addr: FfiSocketAddr,
 
) -> c_int {
 
    StoredError::tl_clear();
 
    match connector.new_udp_mediator_component(local_addr.into(), peer_addr.into()) {
 
        Ok([p, g]) => {
 
            if !putter.is_null() {
 
                putter.write(p);
 
            }
 
            if !getter.is_null() {
 
                getter.write(g);
 
            }
 
            RW_OK
 
        }
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR
 
        }
 
    }
 
}
 

	
 
/// Connects this connector to the distributed system of connectors reachable through endpoints,
 
/// Usable in setup state, and changes the state to communication.
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_connect(
 
    connector: &mut Connector,
 
    timeout_millis: i64,
 
) -> c_int {
 
    StoredError::tl_clear();
 
    let option_timeout_millis: Option<u64> = TryFrom::try_from(timeout_millis).ok();
 
    let timeout = option_timeout_millis.map(Duration::from_millis);
 
    match connector.connect(timeout) {
 
        Ok(()) => RW_OK,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR
 
        }
 
    }
 
}
 

	
 
// #[no_mangle]
 
// pub unsafe extern "C" fn connector_put_payload(
 
//     connector: &mut Connector,
 
//     port: PortId,
 
//     payload: *mut Payload,
 
// ) -> c_int {
 
//     match connector.put(port, payload.read()) {
 
//         Ok(()) => 0,
 
//         Err(err) => {
 
//             StoredError::tl_debug_store(&err);
 
//             -1
 
//         }
 
//     }
 
// }
 

	
 
// #[no_mangle]
 
// pub unsafe extern "C" fn connector_put_payload_cloning(
 
//     connector: &mut Connector,
 
//     port: PortId,
 
//     payload: &Payload,
 
// ) -> c_int {
 
//     match connector.put(port, payload.clone()) {
 
//         Ok(()) => 0,
 
//         Err(err) => {
 
//             StoredError::tl_debug_store(&err);
 
//             -1
 
//         }
 
//     }
 
// }
 

	
 
/// Convenience function combining the functionalities of
 
/// "payload_new" with "connector_put_payload".
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_put_bytes(
 
    connector: &mut Connector,
 
    port: PortId,
 
    bytes_ptr: *const u8,
 
    bytes_len: usize,
 
) -> c_int {
 
    StoredError::tl_clear();
 
    let bytes = &*slice_from_raw_parts(bytes_ptr, bytes_len);
 
    match connector.put(port, Payload::from(bytes)) {
 
        Ok(()) => RW_OK,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR
 
        }
 
    }
 
}
 

	
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_get(connector: &mut Connector, port: PortId) -> c_int {
 
    StoredError::tl_clear();
 
    match connector.get(port) {
 
        Ok(()) => RW_OK,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR
 
        }
 
    }
 
}
 

	
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_next_batch(connector: &mut Connector) -> isize {
 
    StoredError::tl_clear();
 
    match connector.next_batch() {
 
        Ok(n) => n as isize,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR as isize
 
        }
 
    }
 
}
 

	
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_sync(connector: &mut Connector, timeout_millis: i64) -> isize {
 
    StoredError::tl_clear();
 
    let option_timeout_millis: Option<u64> = TryFrom::try_from(timeout_millis).ok();
 
    let timeout = option_timeout_millis.map(Duration::from_millis);
 
    match connector.sync(timeout) {
 
        Ok(n) => n as isize,
 
        Err(err) => {
 
            StoredError::tl_debug_store(&err);
 
            RW_TL_ERR as isize
 
        }
 
    }
 
}
 

	
 
#[no_mangle]
 
pub unsafe extern "C" fn connector_gotten_bytes(
 
    connector: &mut Connector,
 
    port: PortId,
 
    out_len: *mut usize,
 
) -> *const u8 {
 
    StoredError::tl_clear();
 
    match connector.gotten(port) {
 
        Ok(payload_borrow) => {
 
            let slice = payload_borrow.as_slice();
 
            if !out_len.is_null() {
 
                out_len.write(slice.len());
src/protocol/eval/error.rs
Show inline comments
 
new file 100644
 
use std::fmt;
 

	
 
use crate::protocol::{
 
    ast::*,
 
    Module,
 
    input_source::{ErrorStatement, StatementKind}
 
};
 
use super::executor::*;
 

	
 
/// Represents a stack frame recorded in an error
 
#[derive(Debug)]
 
pub struct EvalFrame {
 
    pub line: u32,
 
    pub module_name: String,
 
    pub procedure: String, // function or component
 
    pub is_func: bool,
 
}
 

	
 
impl fmt::Display for EvalFrame {
 
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 
        let func_or_comp = if self.is_func {
 
            "function "
 
        } else {
 
            "component"
 
        };
 

	
 
        if self.module_name.is_empty() {
 
            write!(f, "{} {}:{}", func_or_comp, &self.procedure, self.line)
 
        } else {
 
            write!(f, "{} {}:{}:{}", func_or_comp, &self.module_name, &self.procedure, self.line)
 
        }
 
    }
 
}
 

	
 
/// Represents an error that ocurred during evaluation. Contains error
 
/// statements just like in parsing errors. Additionally may display the current
 
/// execution state.
 
#[derive(Debug)]
 
pub struct EvalError {
 
    pub(crate) statements: Vec<ErrorStatement>,
 
    pub(crate) frames: Vec<EvalFrame>,
 
}
 

	
 
impl EvalError {
 
    pub(crate) fn new_error_at_expr(prompt: &Prompt, modules: &[Module], heap: &Heap, expr_id: ExpressionId, msg: String) -> EvalError {
 
        // Create frames
 
        debug_assert!(!prompt.frames.is_empty());
 
        let mut frames = Vec::with_capacity(prompt.frames.len());
 
        let mut last_module_source = &modules[0].source;
 
        for frame in prompt.frames.iter() {
 
            let definition = &heap[frame.definition];
 
            let statement = &heap[frame.position];
 
            let statement_span = statement.span();
 

	
 
            let (root_id, procedure, is_func) = match definition {
 
                Definition::Function(def) => {
 
                    (def.defined_in, def.identifier.value.as_str().to_string(), true)
 
                },
 
                Definition::Component(def) => {
 
                    (def.defined_in, def.identifier.value.as_str().to_string(), false)
 
                },
 
                _ => unreachable!("construct stack frame with definition pointing to data type")
 
            };
 

	
 
            // Lookup module name, if it has one
 
            let module = modules.iter().find(|m| m.root_id == root_id).unwrap();
 
            let module_name = if let Some(name) = &module.name {
 
                name.as_str().to_string()
 
            } else {
 
                String::new()
 
            };
 

	
 
            last_module_source = &module.source;
 
            frames.push(EvalFrame{
 
                line: statement_span.begin.line,
 
                module_name,
 
                procedure,
 
                is_func
 
            });
 
        }
 

	
 
        let expr = &heap[expr_id];
 
        let statements = vec![
 
            ErrorStatement::from_source_at_span(StatementKind::Error, last_module_source, expr.span(), msg)
 
        ];
 

	
 
        EvalError{ statements, frames }
 
    }
 
}
 

	
 
impl fmt::Display for EvalError {
 
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 
        // Display error statement(s)
 
        self.statements[0].fmt(f)?;
 
        for statement in self.statements.iter().skip(1) {
 
            writeln!(f)?;
 
            statement.fmt(f)?;
 
        }
 

	
 
        // Display stack trace
 
        writeln!(f)?;
 
        writeln!(f, " +-  Stack trace:")?;
 
        for frame in self.frames.iter().rev() {
 
            write!(f, " | ")?;
 
            frame.fmt(f)?;
 
            writeln!(f)?;
 
        }
 

	
 
        Ok(())
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/executor.rs
Show inline comments
 

	
 
use std::collections::VecDeque;
 

	
 
use super::value::*;
 
use super::store::*;
 
use super::error::*;
 
use crate::protocol::*;
 
use crate::protocol::ast::*;
 

	
 
macro_rules! debug_enabled { () => { true }; }
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(true, "exec", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(true, "exec", $format, $($args),*);
 
    };
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) enum ExprInstruction {
 
    EvalExpr(ExpressionId),
 
    PushValToFront,
 
}
 

	
 
#[derive(Debug, Clone)]
 
pub(crate) struct Frame {
 
    definition: DefinitionId,
 
    position: StatementId,
 
    expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
    pub(crate) definition: DefinitionId,
 
    pub(crate) position: StatementId,
 
    pub(crate) expr_stack: VecDeque<ExprInstruction>, // hack for expression evaluation, evaluated by popping from back
 
    pub(crate) expr_values: VecDeque<Value>, // hack for expression results, evaluated by popping from front/back
 
}
 

	
 
impl Frame {
 
    /// Creates a new execution frame. Does not modify the stack in any way.
 
    pub fn new(heap: &Heap, definition_id: DefinitionId) -> Self {
 
        let definition = &heap[definition_id];
 
        let first_statement = match definition {
 
            Definition::Component(definition) => definition.body,
 
            Definition::Function(definition) => definition.body,
 
            _ => unreachable!("initializing frame with {:?} instead of a function/component", definition),
 
        };
 

	
 
        Frame{
 
            definition: definition_id,
 
            position: first_statement.upcast(),
 
            expr_stack: VecDeque::with_capacity(128),
 
            expr_values: VecDeque::with_capacity(128),
 
        }
 
    }
 

	
 
    /// Prepares a single expression for execution. This involves walking the
 
    /// expression tree and putting them in the `expr_stack` such that
 
    /// continuously popping from its back will evaluate the expression. The
 
    /// results of each expression will be stored by pushing onto `expr_values`.
 
    pub fn prepare_single_expression(&mut self, heap: &Heap, expr_id: ExpressionId) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear(); // May not be empty if last expression result(s) were discarded
 

	
 
        self.serialize_expression(heap, expr_id);
 
    }
 

	
 
    /// Prepares multiple expressions for execution (i.e. evaluating all
 
    /// function arguments or all elements of an array/union literal). Per
 
    /// expression this works the same as `prepare_single_expression`. However
 
    /// after each expression is evaluated we insert a `PushValToFront`
 
    /// instruction
 
    pub fn prepare_multiple_expressions(&mut self, heap: &Heap, expr_ids: &[ExpressionId]) {
 
        debug_assert!(self.expr_stack.is_empty());
 
        self.expr_values.clear();
 

	
 
        for expr_id in expr_ids {
 
            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
            self.serialize_expression(heap, *expr_id);
 
        }
 
    }
 

	
 
    /// Performs depth-first serialization of expression tree. Let's not care
 
    /// about performance for a temporary runtime implementation
 
    fn serialize_expression(&mut self, heap: &Heap, id: ExpressionId) {
 
        self.expr_stack.push_back(ExprInstruction::EvalExpr(id));
 

	
 
        match &heap[id] {
 
            Expression::Assignment(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Binding(expr) => {
 
                todo!("implement binding expression");
 
            },
 
            Expression::Conditional(expr) => {
 
                self.serialize_expression(heap, expr.test);
 
            },
 
            Expression::Binary(expr) => {
 
                self.serialize_expression(heap, expr.left);
 
                self.serialize_expression(heap, expr.right);
 
            },
 
            Expression::Unary(expr) => {
 
                self.serialize_expression(heap, expr.expression);
 
            },
 
            Expression::Indexing(expr) => {
 
                self.serialize_expression(heap, expr.index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Slicing(expr) => {
 
                self.serialize_expression(heap, expr.from_index);
 
                self.serialize_expression(heap, expr.to_index);
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Select(expr) => {
 
                self.serialize_expression(heap, expr.subject);
 
            },
 
            Expression::Literal(expr) => {
 
                // Here we only care about literals that have subexpressions
 
                match &expr.value {
 
                    Literal::Null | Literal::True | Literal::False |
 
                    Literal::Character(_) | Literal::String(_) |
 
                    Literal::Integer(_) | Literal::Enum(_) => {
 
                        // No subexpressions
 
                    },
 
                    Literal::Struct(literal) => {
 
                        for field in &literal.fields {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, field.value);
 
                        }
 
                    },
 
                    Literal::Union(literal) => {
 
                        for value_expr_id in &literal.values {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    },
 
                    Literal::Array(value_expr_ids) => {
 
                        for value_expr_id in value_expr_ids {
 
                            self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                            self.serialize_expression(heap, *value_expr_id);
 
                        }
 
                    }
 
                }
 
            },
 
            Expression::Call(expr) => {
 
                for arg_expr_id in &expr.arguments {
 
                    self.expr_stack.push_back(ExprInstruction::PushValToFront);
 
                    self.serialize_expression(heap, *arg_expr_id);
 
                }
 
            },
 
            Expression::Variable(expr) => {
 
                // No subexpressions
 
            }
 
        }
 
    }
 
}
 

	
 
type EvalResult = Result<EvalContinuation, ()>;
 
type EvalResult = Result<EvalContinuation, EvalError>;
 

	
 
pub enum EvalContinuation {
 
    Stepping,
 
    Inconsistent,
 
    Terminal,
 
    SyncBlockStart,
 
    SyncBlockEnd,
 
    NewComponent(DefinitionId, ValueGroup),
 
    BlockFires(Value),
 
    BlockGet(Value),
 
    Put(Value, Value),
 
}
 

	
 
// Note: cloning is fine, methinks. cloning all values and the heap regions then
 
// we end up with valid "pointers" to heap regions.
 
#[derive(Debug, Clone)]
 
pub struct Prompt {
 
    pub(crate) frames: Vec<Frame>,
 
    pub(crate) store: Store,
 
}
 

	
 
impl Prompt {
 
    pub fn new(heap: &Heap, def: DefinitionId, args: ValueGroup) -> Self {
 
        let mut prompt = Self{
 
            frames: Vec::new(),
 
            store: Store::new(),
 
        };
 

	
 
        prompt.frames.push(Frame::new(heap, def));
 
        args.into_store(&mut prompt.store);
 

	
 
        prompt
 
    }
 

	
 
    pub(crate) fn step(&mut self, heap: &Heap, ctx: &mut EvalContext) -> EvalResult {
 
    pub(crate) fn step(&mut self, heap: &Heap, modules: &[Module], ctx: &mut EvalContext) -> EvalResult {
 
        // Helper function to transfer multiple values from the expression value
 
        // array into a heap region (e.g. constructing arrays or structs).
 
        fn transfer_expression_values_front_into_heap(cur_frame: &mut Frame, store: &mut Store, num_values: usize) -> HeapPos {
 
            let heap_pos = store.alloc_heap();
 

	
 
            // Do the transformation first (because Rust...)
 
            for val_idx in 0..num_values {
 
                cur_frame.expr_values[val_idx] = store.read_take_ownership(cur_frame.expr_values[val_idx].clone());
 
            }
 

	
 
            // And now transfer to the heap region
 
            let values = &mut store.heap_regions[heap_pos as usize].values;
 
            debug_assert!(values.is_empty());
 
            values.reserve(num_values);
 
            for _ in 0..num_values {
 
                values.push(cur_frame.expr_values.pop_front().unwrap());
 
            }
 

	
 
            heap_pos
 
        }
 

	
 
        // Helper function to make sure that an index into an aray is valid.
 
        fn array_inclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx >= array_len as i64;
 
        }
 

	
 
        fn array_exclusive_index_is_invalid(store: &Store, array_heap_pos: u32, idx: i64) -> bool {
 
            let array_len = store.heap_regions[array_heap_pos as usize].values.len();
 
            return idx < 0 || idx > array_len as i64;
 
        }
 

	
 
        fn construct_array_error(prompt: &Prompt, modules: &[Module], heap: &Heap, expr_id: ExpressionId, heap_pos: u32, idx: i64) -> EvalError {
 
            let array_len = prompt.store.heap_regions[heap_pos as usize].values.len();
 
            return EvalError::new_error_at_expr(
 
                prompt, modules, heap, expr_id,
 
                format!("index {} is out of bounds: array length is {}", idx, array_len)
 
            )
 
        }
 

	
 
        // Checking if we're at the end of execution
 
        let cur_frame = self.frames.last_mut().unwrap();
 
        if cur_frame.position.is_invalid() {
 
            if heap[cur_frame.definition].is_function() {
 
                todo!("End of function without return, return an evaluation error");
 
            }
 
            return Ok(EvalContinuation::Terminal);
 
        }
 

	
 
        debug_log!("Taking step in '{}'", heap[cur_frame.definition].identifier().value.as_str());
 

	
 
        // Execute all pending expressions
 
        while !cur_frame.expr_stack.is_empty() {
 
            let next = cur_frame.expr_stack.pop_back().unwrap();
 
            debug_log!("Expr stack: {:?}", next);
 
            match next {
 
                ExprInstruction::PushValToFront => {
 
                    cur_frame.expr_values.rotate_right(1);
 
                },
 
                ExprInstruction::EvalExpr(expr_id) => {
 
                    let expr = &heap[expr_id];
 
                    match expr {
 
                        Expression::Assignment(expr) => {
 
                            let to = cur_frame.expr_values.pop_back().unwrap().as_ref();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 

	
 
                            // Note: although not pretty, the assignment operator takes ownership
 
                            // of the right-hand side value when possible. So we do not drop the
 
                            // rhs's optionally owned heap data.
 
                            let rhs = self.store.read_take_ownership(rhs);
 
                            apply_assignment_operator(&mut self.store, to, expr.operation, rhs);
 
                        },
 
                        Expression::Binding(_expr) => {
 
                            todo!("Binding expression");
 
                        },
 
                        Expression::Conditional(expr) => {
 
                            // Evaluate testing expression, then extend the
 
                            // expression stack with the appropriate expression
 
                            let test_result = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                            if test_result {
 
                                cur_frame.serialize_expression(heap, expr.true_expression);
 
                            } else {
 
                                cur_frame.serialize_expression(heap, expr.false_expression);
 
                            }
 
                        },
 
                        Expression::Binary(expr) => {
 
                            let lhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let rhs = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_binary_operator(&mut self.store, &lhs, expr.operation, &rhs);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(lhs.get_heap_pos());
 
                            self.store.drop_value(rhs.get_heap_pos());
 
                        },
 
                        Expression::Unary(expr) => {
 
                            let val = cur_frame.expr_values.pop_back().unwrap();
 
                            let result = apply_unary_operator(&mut self.store, expr.operation, &val);
 
                            cur_frame.expr_values.push_back(result);
 
                            self.store.drop_value(val.get_heap_pos());
 
                        },
 
                        Expression::Indexing(expr) => {
 
                            // TODO: Out of bounds checking
 
                            // Evaluate index. Never heap allocated so we do
 
                            // not have to drop it.
 
                            let index = cur_frame.expr_values.pop_back().unwrap();
 
                            let index = self.store.maybe_read_ref(&index);
 

	
 
                            debug_assert!(index.is_integer());
 
                            let index = if index.is_signed_integer() {
 
                                index.as_signed_integer() as u32
 
                                index.as_signed_integer() as i64
 
                            } else {
 
                                index.as_unsigned_integer() as u32
 
                                index.as_unsigned_integer() as i64
 
                            };
 

	
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 

	
 
                            let (deallocate_heap_pos, value_to_push) = match subject {
 
                            let (deallocate_heap_pos, value_to_push, subject_heap_pos) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    // Our expression stack value is a reference to something that
 
                                    // exists in the normal stack/heap. We don't want to deallocate
 
                                    // this thing. Rather we want to return a reference to it.
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = subject.as_array();
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, index)))
 
                                    if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) {
 
                                        return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index));
 
                                    }
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, index as u32)), subject_heap_pos)
 
                                },
 
                                _ => {
 
                                    // Our value lives on the expression stack, hence we need to
 
                                    // clone whatever we're referring to. Then drop the subject.
 
                                    let subject_heap_pos = subject.as_array();
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, index));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed))
 

	
 
                                    if array_inclusive_index_is_invalid(&self.store, subject_heap_pos, index) {
 
                                        return Err(construct_array_error(self, modules, heap, expr_id, subject_heap_pos, index));
 
                                    }
 

	
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, index as u32));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed), subject_heap_pos)
 
                                },
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value_to_push);
 
                            self.store.drop_value(deallocate_heap_pos);
 
                        },
 
                        Expression::Slicing(expr) => {
 
                            // TODO: Out of bounds checking
 
                            todo!("implement slicing")
 
                            // Evaluate indices
 
                            let from_index = cur_frame.expr_values.pop_back().unwrap();
 
                            let from_index = self.store.maybe_read_ref(&from_index);
 
                            let to_index = cur_frame.expr_values.pop_back().unwrap();
 
                            let to_index = self.store.maybe_read_ref(&to_index);
 

	
 
                            debug_assert!(from_index.is_integer() && to_index.is_integer());
 
                            let from_index = if from_index.is_signed_integer() {
 
                                from_index.as_signed_integer()
 
                            } else {
 
                                from_index.as_unsigned_integer() as i64
 
                            };
 
                            let to_index = if to_index.is_signed_integer() {
 
                                to_index.as_signed_integer()
 
                            } else {
 
                                to_index.as_unsigned_integer() as i64
 
                            };
 

	
 
                            // Dereference subject if needed
 
                            let subject = cur_frame.expr_values.pop_back().unwrap();
 
                            let deref_subject = self.store.maybe_read_ref(&subject);
 

	
 
                            // Slicing needs to produce a copy anyway (with the
 
                            // current evaluator implementation)
 
                            let array_heap_pos = deref_subject.as_array();
 
                            if array_inclusive_index_is_invalid(&self.store, array_heap_pos, from_index) {
 
                                return Err(construct_array_error(self, modules, heap, expr.from_index, array_heap_pos, from_index));
 
                            }
 
                            if array_exclusive_index_is_invalid(&self.store, array_heap_pos, to_index) {
 
                                return Err(construct_array_error(self, modules, heap, expr.to_index, array_heap_pos, to_index));
 
                            }
 

	
 
                            // Again: would love to push directly, but rust...
 
                            let new_heap_pos = self.store.alloc_heap();
 
                            debug_assert!(self.store.heap_regions[new_heap_pos as usize].values.is_empty());
 
                            if to_index > from_index {
 
                                let from_index = from_index as usize;
 
                                let to_index = to_index as usize;
 
                                let mut values = Vec::with_capacity(to_index - from_index);
 
                                for idx in from_index..to_index {
 
                                    let value = self.store.heap_regions[array_heap_pos as usize].values[idx].clone();
 
                                    values.push(self.store.clone_value(value));
 
                                }
 

	
 
                                self.store.heap_regions[new_heap_pos as usize].values = values;
 

	
 
                            } // else: empty range
 

	
 
                            cur_frame.expr_values.push_back(Value::Array(new_heap_pos));
 

	
 
                            // Dropping the original subject, because we don't
 
                            // want to drop something on the stack
 
                            self.store.drop_value(subject.get_heap_pos());
 
                        },
 
                        Expression::Select(expr) => {
 
                            let subject= cur_frame.expr_values.pop_back().unwrap();
 
                            let field_idx = expr.field.as_symbolic().field_idx as u32;
 
                            // Note: same as above: clone if value lives on expr stack, simply
 
                            // refer to it if it already lives on the stack/heap.
 
                            let (deallocate_heap_pos, value_to_push) = match subject {
 
                                Value::Ref(value_ref) => {
 
                                    let subject = self.store.read_ref(value_ref);
 
                                    let subject_heap_pos = subject.as_struct();
 

	
 
                                    (None, Value::Ref(ValueId::Heap(subject_heap_pos, field_idx)))
 
                                },
 
                                _ => {
 
                                    let subject_heap_pos = subject.as_struct();
 
                                    let subject_indexed = Value::Ref(ValueId::Heap(subject_heap_pos, field_idx));
 
                                    (Some(subject_heap_pos), self.store.clone_value(subject_indexed))
 
                                },
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value_to_push);
 
                            self.store.drop_value(deallocate_heap_pos);
 
                        },
 
                        Expression::Literal(expr) => {
 
                            let value = match &expr.value {
 
                                Literal::Null => Value::Null,
 
                                Literal::True => Value::Bool(true),
 
                                Literal::False => Value::Bool(false),
 
                                Literal::Character(lit_value) => Value::Char(*lit_value),
 
                                Literal::String(lit_value) => {
 
                                    let heap_pos = self.store.alloc_heap();
 
                                    let values = &mut self.store.heap_regions[heap_pos as usize].values;
 
                                    let value = lit_value.as_str();
 
                                    debug_assert!(values.is_empty());
 
                                    values.reserve(value.len());
 
                                    for character in value.as_bytes() {
 
                                        debug_assert!(character.is_ascii());
 
                                        values.push(Value::Char(*character as char));
 
                                    }
 
                                    Value::String(heap_pos)
 
                                }
 
                                Literal::Integer(lit_value) => {
 
                                    use ConcreteTypePart as CTP;
 
                                    debug_assert_eq!(expr.concrete_type.parts.len(), 1);
 
                                    match expr.concrete_type.parts[0] {
 
                                        CTP::UInt8  => Value::UInt8(lit_value.unsigned_value as u8),
 
                                        CTP::UInt16 => Value::UInt16(lit_value.unsigned_value as u16),
 
                                        CTP::UInt32 => Value::UInt32(lit_value.unsigned_value as u32),
 
                                        CTP::UInt64 => Value::UInt64(lit_value.unsigned_value as u64),
 
                                        CTP::SInt8  => Value::SInt8(lit_value.unsigned_value as i8),
 
                                        CTP::SInt16 => Value::SInt16(lit_value.unsigned_value as i16),
 
                                        CTP::SInt32 => Value::SInt32(lit_value.unsigned_value as i32),
 
                                        CTP::SInt64 => Value::SInt64(lit_value.unsigned_value as i64),
 
                                        _ => unreachable!(),
 
                                        _ => unreachable!("got concrete type {:?} for integer literal at expr {:?}", &expr.concrete_type, expr_id),
 
                                    }
 
                                }
 
                                Literal::Struct(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.fields.len()
 
                                    );
 
                                    Value::Struct(heap_pos)
 
                                }
 
                                Literal::Enum(lit_value) => {
 
                                    Value::Enum(lit_value.variant_idx as i64)
 
                                }
 
                                Literal::Union(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.values.len()
 
                                    );
 
                                    Value::Union(lit_value.variant_idx as i64, heap_pos)
 
                                }
 
                                Literal::Array(lit_value) => {
 
                                    let heap_pos = transfer_expression_values_front_into_heap(
 
                                        cur_frame, &mut self.store, lit_value.len()
 
                                    );
 
                                    Value::Array(heap_pos)
 
                                }
 
                            };
 

	
 
                            cur_frame.expr_values.push_back(value);
 
                        },
 
                        Expression::Call(expr) => {
 
                            // Push a new frame. Note that all expressions have
 
                            // been pushed to the front, so they're in the order
 
                            // of the definition.
 
                            let num_args = expr.arguments.len();
 

	
 
                            // Determine stack boundaries
 
                            let cur_stack_boundary = self.store.cur_stack_boundary;
 
                            let new_stack_boundary = self.store.stack.len();
 

	
 
                            // Push new boundary and function arguments for new frame
 
                            self.store.stack.push(Value::PrevStackBoundary(cur_stack_boundary as isize));
 
                            for _ in 0..num_args {
 
                                let argument = self.store.read_take_ownership(cur_frame.expr_values.pop_front().unwrap());
 
                                self.store.stack.push(argument);
 
                            }
 

	
 
                            // Push the new frame
 
                            self.frames.push(Frame::new(heap, expr.definition));
 
                            self.store.cur_stack_boundary = new_stack_boundary;
 

	
 
                            // To simplify the logic a little bit we will now
 
                            // return and ask our caller to call us again
 
                            return Ok(EvalContinuation::Stepping);
 
                        },
 
                        Expression::Variable(expr) => {
 
                            let variable = &heap[expr.declaration.unwrap()];
 
                            cur_frame.expr_values.push_back(Value::Ref(ValueId::Stack(variable.unique_id_in_scope as StackPos)));
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        debug_log!("Frame [{:?}] at {:?}, stack size = {}", cur_frame.definition, cur_frame.position, self.store.stack.len());
 
        if debug_enabled!() {
 
            debug_log!("Stack:");
 
            for (stack_idx, stack_val) in self.store.stack.iter().enumerate() {
 
                debug_log!("  [{:03}] {:?}", stack_idx, stack_val);
 
            }
 

	
 
            debug_log!("Heap:");
 
            for (heap_idx, heap_region) in self.store.heap_regions.iter().enumerate() {
 
                let is_free = self.store.free_regions.iter().any(|idx| *idx as usize == heap_idx);
 
                debug_log!("  [{:03}] in_use: {}, len: {}, vals: {:?}", heap_idx, !is_free, heap_region.values.len(), &heap_region.values);
 
            }
 
        }
 
        // No (more) expressions to evaluate. So evaluate statement (that may
 
        // depend on the result on the last evaluated expression(s))
 
        let stmt = &heap[cur_frame.position];
 
        let return_value = match stmt {
 
            Statement::Block(stmt) => {
 
                // Reserve space on stack, but also make sure excess stack space
 
                // is cleared
 
                self.store.clear_stack(stmt.first_unique_id_in_scope as usize);
 
                self.store.reserve_stack(stmt.next_unique_id_in_scope as usize);
 
                cur_frame.position = stmt.statements[0];
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndBlock(stmt) => {
 
                let block = &heap[stmt.start_block];
 
                self.store.clear_stack(block.first_unique_id_in_scope as usize);
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Local(stmt) => {
 
                match stmt {
 
                    LocalStatement::Memory(stmt) => {
 
                        let variable = &heap[stmt.variable];
 
                        self.store.write(ValueId::Stack(variable.unique_id_in_scope as u32), Value::Unassigned);
 

	
 
                        cur_frame.position = stmt.next;
 
                    },
 
                    LocalStatement::Channel(stmt) => {
 
                        let [from_value, to_value] = ctx.new_channel();
 
                        self.store.write(ValueId::Stack(heap[stmt.from].unique_id_in_scope as u32), from_value);
 
                        self.store.write(ValueId::Stack(heap[stmt.to].unique_id_in_scope as u32), to_value);
 

	
 
                        cur_frame.position = stmt.next;
 
                    }
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Labeled(stmt) => {
 
                cur_frame.position = stmt.body;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::If(stmt) => {
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for if statement");
 
                let test_value = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                if test_value {
 
                    cur_frame.position = stmt.true_body.upcast();
 
                } else if let Some(false_body) = stmt.false_body {
 
                    cur_frame.position = false_body.upcast();
 
                } else {
 
                    // Not true, and no false body
 
                    cur_frame.position = stmt.end_if.upcast();
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndIf(stmt) => {
 
                cur_frame.position = stmt.next;
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::While(stmt) => {
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for while statement");
 
                let test_value = cur_frame.expr_values.pop_back().unwrap().as_bool();
 
                if test_value {
 
                    cur_frame.position = stmt.body.upcast();
 
                } else {
 
                    cur_frame.position = stmt.end_while.upcast();
 
                }
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::EndWhile(stmt) => {
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Break(stmt) => {
 
                cur_frame.position = stmt.target.unwrap().upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Continue(stmt) => {
 
                cur_frame.position = stmt.target.unwrap().upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::Synchronous(stmt) => {
 
                cur_frame.position = stmt.body.upcast();
 

	
 
                Ok(EvalContinuation::SyncBlockStart)
 
            },
 
            Statement::EndSynchronous(stmt) => {
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::SyncBlockEnd)
 
            },
 
            Statement::Return(stmt) => {
 
                debug_assert!(heap[cur_frame.definition].is_function());
 
                debug_assert_eq!(cur_frame.expr_values.len(), 1, "expected one expr value for return statement");
 

	
 
                // The preceding frame has executed a call, so is expecting the
 
                // return expression on its expression value stack. Note that
 
                // we may be returning a reference to something on our stack,
 
                // so we need to read that value and clone it.
 
                let return_value = cur_frame.expr_values.pop_back().unwrap();
 
                let return_value = match return_value {
 
                    Value::Ref(value_id) => self.store.read_copy(value_id),
 
                    _ => return_value,
 
                };
 

	
 
                // Pre-emptively pop our stack frame
 
                self.frames.pop();
 

	
 
                // Clean up our section of the stack
 
                self.store.clear_stack(0);
 
                let prev_stack_idx = self.store.stack.pop().unwrap().as_stack_boundary();
 

	
 
                // TODO: Temporary hack for testing, remove at some point
 
                if self.frames.is_empty() {
 
                    debug_assert!(prev_stack_idx == -1);
 
                    debug_assert!(self.store.stack.len() == 0);
 
                    self.store.stack.push(return_value);
 
                    return Ok(EvalContinuation::Terminal);
 
                }
 

	
 
                debug_assert!(prev_stack_idx >= 0);
 
                // Return to original state of stack frame
 
                self.store.cur_stack_boundary = prev_stack_idx as usize;
 
                let cur_frame = self.frames.last_mut().unwrap();
 
                cur_frame.expr_values.push_back(return_value);
 

	
 
                // We just returned to the previous frame, which might be in
 
                // the middle of evaluating expressions for a particular
 
                // statement. So we don't want to enter the code below.
 
                return Ok(EvalContinuation::Stepping);
 
            },
 
            Statement::Goto(stmt) => {
 
                cur_frame.position = stmt.target.unwrap().upcast();
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
            Statement::New(stmt) => {
 
                let call_expr = &heap[stmt.expression];
 
                debug_assert!(heap[call_expr.definition].is_component());
 
                debug_assert_eq!(
 
                    cur_frame.expr_values.len(), heap[call_expr.definition].parameters().len(),
 
                    "mismatch in expr stack size and number of arguments for new statement"
 
                );
 

	
 
                // Note that due to expression value evaluation they exist in
 
                // reverse order on the stack.
 
                // TODO: Revise this code, keep it as is to be compatible with current runtime
 
                let mut args = Vec::new();
 
                while let Some(value) = cur_frame.expr_values.pop_front() {
 
                    args.push(value);
 
                }
 

	
 
                // Construct argument group, thereby copying heap regions
 
                let argument_group = ValueGroup::from_store(&self.store, &args);
 

	
 
                // Clear any heap regions
 
                for arg in &args {
 
                    self.store.drop_value(arg.get_heap_pos());
 
                }
 

	
 
                cur_frame.position = stmt.next;
 

	
 
                todo!("Make sure this is handled correctly, transfer 'heap' values to another Prompt");
 
                Ok(EvalContinuation::NewComponent(call_expr.definition, argument_group))
 
            },
 
            Statement::Expression(stmt) => {
 
                // The expression has just been completely evaluated. Some
 
                // values might have remained on the expression value stack.
 
                cur_frame.expr_values.clear();
 
                cur_frame.position = stmt.next;
 

	
 
                Ok(EvalContinuation::Stepping)
 
            },
 
        };
 

	
 
        assert!(
 
            cur_frame.expr_values.is_empty(),
 
            "This is a debugging assertion that will fail if you perform expressions without \
 
            assigning to anything. This should be completely valid, and this assertions should be \
 
            assigning to anything. This should be completely valid, and this assertion should be \
 
            replaced by something that clears the expression values if needed, but I'll keep this \
 
            in for now for debugging purposes."
 
        );
 

	
 
        // If the next statement requires evaluating expressions then we push
 
        // these onto the expression stack. This way we will evaluate this
 
        // stack in the next loop, then evaluate the statement using the result
 
        // from the expression evaluation.
 
        if !cur_frame.position.is_invalid() {
 
            let stmt = &heap[cur_frame.position];
 

	
 
            match stmt {
 
                Statement::If(stmt) => cur_frame.prepare_single_expression(heap, stmt.test),
 
                Statement::While(stmt) => cur_frame.prepare_single_expression(heap, stmt.test),
 
                Statement::Return(stmt) => {
 
                    debug_assert_eq!(stmt.expressions.len(), 1); // TODO: @ReturnValues
 
                    cur_frame.prepare_single_expression(heap, stmt.expressions[0]);
 
                },
 
                Statement::New(stmt) => {
 
                    // Note that we will end up not evaluating the call itself.
 
                    // Rather we will evaluate its expressions and then
 
                    // instantiate the component upon reaching the "new" stmt.
 
                    let call_expr = &heap[stmt.expression];
 
                    cur_frame.prepare_multiple_expressions(heap, &call_expr.arguments);
 
                },
 
                Statement::Expression(stmt) => {
 
                    cur_frame.prepare_single_expression(heap, stmt.expression);
 
                }
 
                _ => {},
 
            }
 
        }
 

	
 
        return_value
 
    }
 
}
 
\ No newline at end of file
src/protocol/eval/mod.rs
Show inline comments
 
/// eval
 
///
 
/// Evaluator of the generated AST. Note that we use some misappropriated terms
 
/// to describe where values live and what they do. This is a temporary
 
/// implementation of an evaluator until some kind of appropriate bytecode or
 
/// machine code is generated.
 
///
 
/// Code is always executed within a "frame". For Reowolf the first frame is
 
/// usually an executed component. All subsequent frames are function calls.
 
/// Simple values live on the "stack". Each variable/parameter has a place on
 
/// the stack where its values are stored. If the value is not a primitive, then
 
/// its value will be stored in the "heap". Expressions are treated differently
 
/// and use a separate "stack" for their evaluation.
 
///
 
/// Since this is a value-based language, most values are copied. One has to be
 
/// careful with values that reside in the "heap" and make sure that copies are
 
/// properly removed from the heap..
 
///
 
/// Just to reiterate: this is a temporary wasteful implementation. A proper
 
/// implementation would fully fill out the type table with alignment/size/
 
/// offset information and lay out bytecode.
 

	
 
pub(crate) mod value;
 
pub(crate) mod store;
 
pub(crate) mod executor;
 
pub(crate) mod error;
 

	
 
pub use error::EvalError;
 
pub use value::{Value, ValueGroup};
 
pub(crate) use store::{Store};
 
pub use executor::{EvalContinuation, Prompt};
 

	
src/protocol/eval/value.rs
Show inline comments
 
@@ -345,256 +345,257 @@ pub(crate) fn apply_binary_operator(store: &mut Store, lhs: &Value, op: BinaryOp
 
        }
 
    }
 

	
 
    macro_rules! apply_int_op_and_return_bool {
 
        ($lhs:ident, $operator_tokens:tt, $operator:ident, $rhs:ident) => {
 
            return match $lhs {
 
                Value::UInt8(v)  => { Value::Bool(*v $operator_tokens $rhs.as_uint8() ) },
 
                Value::UInt16(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint16()) },
 
                Value::UInt32(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint32()) },
 
                Value::UInt64(v) => { Value::Bool(*v $operator_tokens $rhs.as_uint64()) },
 
                Value::SInt8(v)  => { Value::Bool(*v $operator_tokens $rhs.as_sint8() ) },
 
                Value::SInt16(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint16()) },
 
                Value::SInt32(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint32()) },
 
                Value::SInt64(v) => { Value::Bool(*v $operator_tokens $rhs.as_sint64()) },
 
                _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", $operator, $lhs, $rhs)
 
            };
 
        }
 
    }
 

	
 
    // We need to handle concatenate in a special way because it needs the store
 
    // mutably.
 
    if op == BO::Concatenate {
 
        let target_heap_pos = store.alloc_heap();
 
        let lhs_heap_pos;
 
        let rhs_heap_pos;
 

	
 
        let lhs = store.maybe_read_ref(lhs);
 
        let rhs = store.maybe_read_ref(rhs);
 

	
 
        enum ValueKind { Message, String, Array };
 
        let value_kind;
 

	
 
        match lhs {
 
            Value::Message(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_message();
 
                value_kind = ValueKind::Message;
 
            },
 
            Value::String(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_string();
 
                value_kind = ValueKind::String;
 
            },
 
            Value::Array(lhs_pos) => {
 
                lhs_heap_pos = *lhs_pos;
 
                rhs_heap_pos = rhs.as_array();
 
                value_kind = ValueKind::Array;
 
            },
 
            _ => unreachable!("apply_binary_operator {:?} on lhs {:?} and rhs {:?}", op, lhs, rhs)
 
        }
 

	
 
        let lhs_heap_pos = lhs_heap_pos as usize;
 
        let rhs_heap_pos = rhs_heap_pos as usize;
 

	
 
        // TODO: I hate this, but fine...
 
        let mut concatenated = Vec::new();
 
        let lhs_len = store.heap_regions[lhs_heap_pos].values.len();
 
        let rhs_len = store.heap_regions[rhs_heap_pos].values.len();
 
        concatenated.reserve(lhs_len + rhs_len);
 
        for idx in 0..lhs_len {
 
            concatenated.push(store.clone_value(store.heap_regions[lhs_heap_pos].values[idx].clone()));
 
        }
 
        for idx in 0..rhs_len {
 
            concatenated.push(store.clone_value(store.heap_regions[rhs_heap_pos].values[idx].clone()));
 
        }
 

	
 
        store.heap_regions[target_heap_pos as usize].values = concatenated;
 

	
 
        return match value_kind{
 
            ValueKind::Message => Value::Message(target_heap_pos),
 
            ValueKind::String => Value::String(target_heap_pos),
 
            ValueKind::Array => Value::Array(target_heap_pos),
 
        };
 
    }
 

	
 
    // If any of the values are references, retrieve the thing they're referring
 
    // to.
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    match op {
 
        BO::Concatenate => unreachable!(),
 
        BO::LogicalOr => {
 
            return Value::Bool(lhs.as_bool() || rhs.as_bool());
 
        },
 
        BO::LogicalAnd => {
 
            return Value::Bool(lhs.as_bool() && rhs.as_bool());
 
        },
 
        BO::BitwiseOr        => { apply_int_op_and_return_self!(lhs, |,  op, rhs); },
 
        BO::BitwiseXor       => { apply_int_op_and_return_self!(lhs, ^,  op, rhs); },
 
        BO::BitwiseAnd       => { apply_int_op_and_return_self!(lhs, &,  op, rhs); },
 
        BO::Equality         => { Value::Bool(apply_equality_operator(store, lhs, rhs)) },
 
        BO::Inequality       => { Value::Bool(apply_inequality_operator(store, lhs, rhs)) },
 
        BO::LessThan         => { apply_int_op_and_return_bool!(lhs, <,  op, rhs); },
 
        BO::GreaterThan      => { apply_int_op_and_return_bool!(lhs, >,  op, rhs); },
 
        BO::LessThanEqual    => { apply_int_op_and_return_bool!(lhs, <=, op, rhs); },
 
        BO::GreaterThanEqual => { apply_int_op_and_return_bool!(lhs, >=, op, rhs); },
 
        BO::ShiftLeft        => { apply_int_op_and_return_self!(lhs, <<, op, rhs); },
 
        BO::ShiftRight       => { apply_int_op_and_return_self!(lhs, >>, op, rhs); },
 
        BO::Add              => { apply_int_op_and_return_self!(lhs, +,  op, rhs); },
 
        BO::Subtract         => { apply_int_op_and_return_self!(lhs, -,  op, rhs); },
 
        BO::Multiply         => { apply_int_op_and_return_self!(lhs, *,  op, rhs); },
 
        BO::Divide           => { apply_int_op_and_return_self!(lhs, /,  op, rhs); },
 
        BO::Remainder        => { apply_int_op_and_return_self!(lhs, %,  op, rhs); }
 
    }
 
}
 

	
 
pub(crate) fn apply_unary_operator(store: &mut Store, op: UnaryOperator, value: &Value) -> Value {
 
    use UnaryOperator as UO;
 

	
 
    macro_rules! apply_int_expr_and_return {
 
        ($value:ident, $apply:tt, $op:ident) => {
 
            return match $value {
 
                Value::UInt8(v)  => Value::UInt8($apply *v),
 
                Value::UInt16(v) => Value::UInt16($apply *v),
 
                Value::UInt32(v) => Value::UInt32($apply *v),
 
                Value::UInt64(v) => Value::UInt64($apply *v),
 
                Value::SInt8(v)  => Value::SInt8($apply *v),
 
                Value::SInt16(v) => Value::SInt16($apply *v),
 
                Value::SInt32(v) => Value::SInt32($apply *v),
 
                Value::SInt64(v) => Value::SInt64($apply *v),
 
                _ => unreachable!("apply_unary_operator {:?} on value {:?}", $op, $value),
 
            };
 
        }
 
    }
 

	
 
    // If the value is a reference, retrieve the thing it is referring to
 
    let value = store.maybe_read_ref(value);
 

	
 
    match op {
 
        UO::Positive => {
 
            debug_assert!(value.is_integer());
 
            return value.clone();
 
        },
 
        UO::Negative => {
 
            // TODO: Error on negating unsigned integers
 
            return match value {
 
                Value::SInt8(v) => Value::SInt8(-*v),
 
                Value::SInt16(v) => Value::SInt16(-*v),
 
                Value::SInt32(v) => Value::SInt32(-*v),
 
                Value::SInt64(v) => Value::SInt64(-*v),
 
                _ => unreachable!("apply_unary_operator {:?} on value {:?}", op, value),
 
            }
 
        },
 
        UO::BitwiseNot => { apply_int_expr_and_return!(value, !, op)},
 
        UO::LogicalNot => { return Value::Bool(!value.as_bool()); },
 
        UO::PreIncrement => { todo!("implement") },
 
        UO::PreDecrement => { todo!("implement") },
 
        UO::PostIncrement => { todo!("implement") },
 
        UO::PostDecrement => { todo!("implement") },
 
    }
 
}
 

	
 
pub(crate) fn apply_equality_operator(store: &Store, lhs: &Value, rhs: &Value) -> bool {
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    fn eval_equality_heap(store: &Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_vals = &store.heap_regions[lhs_pos as usize].values;
 
        let rhs_vals = &store.heap_regions[rhs_pos as usize].values;
 
        let lhs_len = lhs_vals.len();
 
        if lhs_len != rhs_vals.len() {
 
            return false;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            let lhs_val = &lhs_vals[idx];
 
            let rhs_val = &rhs_vals[idx];
 
            if !apply_equality_operator(store, lhs_val, rhs_val) {
 
                return false;
 
            }
 
        }
 

	
 
        return true;
 
    }
 

	
 
    match lhs {
 
        Value::Input(v) => *v == rhs.as_input(),
 
        Value::Output(v) => *v == rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => *v == rhs.as_bool(),
 
        Value::Char(v) => *v == rhs.as_char(),
 
        Value::String(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => *v == rhs.as_uint8(),
 
        Value::UInt16(v) => *v == rhs.as_uint16(),
 
        Value::UInt32(v) => *v == rhs.as_uint32(),
 
        Value::UInt64(v) => *v == rhs.as_uint64(),
 
        Value::SInt8(v) => *v == rhs.as_sint8(),
 
        Value::SInt16(v) => *v == rhs.as_sint16(),
 
        Value::SInt32(v) => *v == rhs.as_sint32(),
 
        Value::SInt64(v) => *v == rhs.as_sint64(),
 
        Value::Array(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_array()),
 
        Value::Enum(v) => *v == rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if *lhs_tag != rhs_tag {
 
                return false;
 
            }
 
            eval_equality_heap(store, *lhs_pos, rhs_pos)
 
        },
 
        Value::Struct(lhs_pos) => eval_equality_heap(store, *lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_equality_operator to lhs {:?}", lhs),
 
    }
 
}
 

	
 
pub(crate) fn apply_inequality_operator(store: &Store, lhs: &Value, rhs: &Value) -> bool {
 
    let lhs = store.maybe_read_ref(lhs);
 
    let rhs = store.maybe_read_ref(rhs);
 

	
 
    fn eval_inequality_heap(store: &Store, lhs_pos: HeapPos, rhs_pos: HeapPos) -> bool {
 
        let lhs_vals = &store.heap_regions[lhs_pos as usize].values;
 
        let rhs_vals = &store.heap_regions[rhs_pos as usize].values;
 
        let lhs_len = lhs_vals.len();
 
        if lhs_len != rhs_vals.len() {
 
            return true;
 
        }
 

	
 
        for idx in 0..lhs_len {
 
            let lhs_val = &lhs_vals[idx];
 
            let rhs_val = &rhs_vals[idx];
 
            if apply_inequality_operator(store, lhs_val, rhs_val) {
 
                return true;
 
            }
 
        }
 

	
 
        return false;
 
    }
 

	
 
    match lhs {
 
        Value::Input(v) => *v != rhs.as_input(),
 
        Value::Output(v) => *v != rhs.as_output(),
 
        Value::Message(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_message()),
 
        Value::Null => todo!("remove null"),
 
        Value::Bool(v) => *v != rhs.as_bool(),
 
        Value::Char(v) => *v != rhs.as_char(),
 
        Value::String(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_string()),
 
        Value::UInt8(v) => *v != rhs.as_uint8(),
 
        Value::UInt16(v) => *v != rhs.as_uint16(),
 
        Value::UInt32(v) => *v != rhs.as_uint32(),
 
        Value::UInt64(v) => *v != rhs.as_uint64(),
 
        Value::SInt8(v) => *v != rhs.as_sint8(),
 
        Value::SInt16(v) => *v != rhs.as_sint16(),
 
        Value::SInt32(v) => *v != rhs.as_sint32(),
 
        Value::SInt64(v) => *v != rhs.as_sint64(),
 
        Value::Enum(v) => *v != rhs.as_enum(),
 
        Value::Union(lhs_tag, lhs_pos) => {
 
            let (rhs_tag, rhs_pos) = rhs.as_union();
 
            if *lhs_tag != rhs_tag {
 
                return true;
 
            }
 
            eval_inequality_heap(store, *lhs_pos, rhs_pos)
 
        },
 
        Value::String(lhs_pos) => eval_inequality_heap(store, *lhs_pos, rhs.as_struct()),
 
        _ => unreachable!("apply_inequality_operator to lhs {:?}", lhs)
 
    }
 
}
 
\ No newline at end of file
src/protocol/input_source.rs
Show inline comments
 
@@ -24,451 +24,451 @@ impl InputSpan {
 
    // This will only be used for builtin functions
 
    #[inline]
 
    pub fn new() -> InputSpan {
 
        InputSpan{ begin: InputPosition{ line: 0, offset: 0 }, end: InputPosition{ line: 0, offset: 0 }}
 
    }
 

	
 
    #[inline]
 
    pub fn from_positions(begin: InputPosition, end: InputPosition) -> Self {
 
        Self { begin, end }
 
    }
 
}
 

	
 
/// Wrapper around source file with optional filename. Ensures that the file is
 
/// only scanned once.
 
pub struct InputSource {
 
    pub(crate) filename: String,
 
    pub(crate) input: Vec<u8>,
 
    // Iteration
 
    line: u32,
 
    offset: usize,
 
    // State tracking
 
    pub(crate) had_error: Option<ParseError>,
 
    // The offset_lookup is built on-demand upon attempting to report an error.
 
    // Only one procedure will actually create the lookup, afterwards only read
 
    // locks will be held.
 
    offset_lookup: RwLock<Vec<u32>>,
 
}
 

	
 
impl InputSource {
 
    pub fn new(filename: String, input: Vec<u8>) -> Self {
 
        Self{
 
            filename,
 
            input,
 
            line: 1,
 
            offset: 0,
 
            had_error: None,
 
            offset_lookup: RwLock::new(Vec::new()),
 
        }
 
    }
 

	
 
    #[cfg(test)]
 
    pub fn new_test(input: &str) -> Self {
 
        let bytes = Vec::from(input.as_bytes());
 
        return Self::new(String::from("test"), bytes)
 
    }
 

	
 
    #[inline]
 
    pub fn pos(&self) -> InputPosition {
 
        InputPosition { line: self.line, offset: self.offset as u32 }
 
    }
 

	
 
    pub fn next(&self) -> Option<u8> {
 
        if self.offset < self.input.len() {
 
            Some(self.input[self.offset])
 
        } else {
 
            None
 
        }
 
    }
 

	
 
    pub fn lookahead(&self, offset: usize) -> Option<u8> {
 
        let offset_pos = self.offset + offset;
 
        if offset_pos < self.input.len() {
 
            Some(self.input[offset_pos])
 
        } else {
 
            None
 
        }
 
    }
 

	
 
    #[inline]
 
    pub fn section_at_pos(&self, start: InputPosition, end: InputPosition) -> &[u8] {
 
        &self.input[start.offset as usize..end.offset as usize]
 
    }
 

	
 
    #[inline]
 
    pub fn section_at_span(&self, span: InputSpan) -> &[u8] {
 
        &self.input[span.begin.offset as usize..span.end.offset as usize]
 
    }
 

	
 
    // Consumes the next character. Will check well-formedness of newlines: \r
 
    // must be followed by a \n, because this is used for error reporting. Will
 
    // not check for ascii-ness of the file, better left to a tokenizer.
 
    pub fn consume(&mut self) {
 
        match self.next() {
 
            Some(b'\r') => {
 
                if Some(b'\n') == self.lookahead(1) {
 
                    // Well formed file
 
                    self.offset += 1;
 
                } else {
 
                    // Not a well-formed file, pretend like we can continue
 
                    self.offset += 1;
 
                    self.set_error("Encountered carriage-feed without a following newline");
 
                }
 
            },
 
            Some(b'\n') => {
 
                self.line += 1;
 
                self.offset += 1;
 
            },
 
            Some(_) => {
 
                self.offset += 1;
 
            }
 
            None => {}
 
        }
 

	
 
        // Maybe we actually want to check this in release mode. Then again:
 
        // a 4 gigabyte source file... Really?
 
        debug_assert!(self.offset < u32::max_value() as usize);
 
    }
 

	
 
    fn set_error(&mut self, msg: &str) {
 
        if self.had_error.is_none() {
 
            self.had_error = Some(ParseError::new_error_str_at_pos(self, self.pos(), msg));
 
        }
 
    }
 

	
 
    fn get_lookup(&self) -> RwLockReadGuard<Vec<u32>> {
 
        // Once constructed the lookup always contains one element. We use this
 
        // to see if it is constructed already.
 
        {
 
            let lookup = self.offset_lookup.read().unwrap();
 
            if !lookup.is_empty() {
 
                return lookup;
 
            }
 
        }
 

	
 
        // Lookup was not constructed yet
 
        let mut lookup = self.offset_lookup.write().unwrap();
 
        if !lookup.is_empty() {
 
            // Somebody created it before we had the chance
 
            drop(lookup);
 
            let lookup = self.offset_lookup.read().unwrap();
 
            return lookup;
 
        }
 

	
 
        // Build the line number (!) to offset lookup, so offset by 1. We 
 
        // assume the entire source file is scanned (most common case) for
 
        // preallocation.
 
        lookup.reserve(self.line as usize + 2);
 
        lookup.push(0); // line 0: never used
 
        lookup.push(0); // first line: first character
 

	
 
        for char_idx in 0..self.input.len() {
 
            if self.input[char_idx] == b'\n' {
 
                lookup.push(char_idx as u32 + 1);
 
            }
 
        }
 

	
 
        lookup.push(self.input.len() as u32 + 1); // for lookup_line_end, intentionally adding one character
 
        debug_assert_eq!(self.line as usize + 2, lookup.len(), "remove me: i am a testing assert and sometimes invalid");
 

	
 
        // Return created lookup
 
        drop(lookup);
 
        let lookup = self.offset_lookup.read().unwrap();
 
        return lookup;
 
    }
 

	
 
    /// Retrieves offset at which line starts (right after newline)
 
    fn lookup_line_start_offset(&self, line_number: u32) -> u32 {
 
        let lookup = self.get_lookup();
 
        lookup[line_number as usize]
 
    }
 

	
 
    /// Retrieves offset at which line ends (at the newline character or the
 
    /// preceding carriage feed for \r\n-encoded newlines)
 
    fn lookup_line_end_offset(&self, line_number: u32) -> u32 {
 
        let lookup = self.get_lookup();
 
        let offset = lookup[(line_number + 1) as usize] - 1;
 
        let offset_usize = offset as usize;
 

	
 
        // Compensate for newlines and a potential carriage feed. Note that the
 
        // end position is exclusive. So we only need to compensate for a
 
        // "\r\n"
 
        if offset_usize > 0 && offset_usize < self.input.len() && self.input[offset_usize] == b'\n' && self.input[offset_usize - 1] == b'\r' {
 
            offset - 1
 
        } else {
 
            offset
 
        }
 
    }
 
}
 

	
 
#[derive(Debug)]
 
pub enum StatementKind {
 
    Info,
 
    Error
 
}
 

	
 
#[derive(Debug)]
 
pub enum ContextKind {
 
    SingleLine,
 
    MultiLine,
 
}
 

	
 
#[derive(Debug)]
 
pub struct ParseErrorStatement {
 
pub struct ErrorStatement {
 
    pub(crate) statement_kind: StatementKind,
 
    pub(crate) context_kind: ContextKind,
 
    pub(crate) start_line: u32,
 
    pub(crate) start_column: u32,
 
    pub(crate) end_line: u32,
 
    pub(crate) end_column: u32,
 
    pub(crate) filename: String,
 
    pub(crate) context: String,
 
    pub(crate) message: String,
 
}
 

	
 
impl ParseErrorStatement {
 
impl ErrorStatement {
 
    fn from_source_at_pos(statement_kind: StatementKind, source: &InputSource, position: InputPosition, message: String) -> Self {
 
        // Seek line start and end
 
        let line_start = source.lookup_line_start_offset(position.line);
 
        let line_end = source.lookup_line_end_offset(position.line);
 
        let context = Self::create_context(source, line_start as usize, line_end as usize);
 
        debug_assert!(position.offset >= line_start);
 
        let column = position.offset - line_start + 1;
 

	
 
        Self{
 
            statement_kind,
 
            context_kind: ContextKind::SingleLine,
 
            start_line: position.line,
 
            start_column: column,
 
            end_line: position.line,
 
            end_column: column + 1,
 
            filename: source.filename.clone(),
 
            context,
 
            message,
 
        }
 
    }
 

	
 
    fn from_source_at_span(statement_kind: StatementKind, source: &InputSource, span: InputSpan, message: String) -> Self {
 
    pub(crate) fn from_source_at_span(statement_kind: StatementKind, source: &InputSource, span: InputSpan, message: String) -> Self {
 
        debug_assert!(span.end.line >= span.begin.line);
 
        debug_assert!(span.end.offset >= span.begin.offset);
 

	
 
        let first_line_start = source.lookup_line_start_offset(span.begin.line);
 
        let last_line_start = source.lookup_line_start_offset(span.end.line);
 
        let last_line_end = source.lookup_line_end_offset(span.end.line);
 
        let context = Self::create_context(source, first_line_start as usize, last_line_end as usize);
 
        debug_assert!(span.begin.offset >= first_line_start);
 
        let start_column = span.begin.offset - first_line_start + 1;
 
        let end_column = span.end.offset - last_line_start + 1;
 

	
 
        let context_kind = if span.begin.line == span.end.line {
 
            ContextKind::SingleLine
 
        } else {
 
            ContextKind::MultiLine
 
        };
 

	
 
        Self{
 
            statement_kind,
 
            context_kind,
 
            start_line: span.begin.line,
 
            start_column,
 
            end_line: span.end.line,
 
            end_column,
 
            filename: source.filename.clone(),
 
            context,
 
            message,
 
        }
 
    }
 

	
 
    /// Produces context from source
 
    fn create_context(source: &InputSource, start: usize, end: usize) -> String {
 
        let context_raw = &source.input[start..end];
 
        String::from_utf8_lossy(context_raw).to_string()
 
    }
 
}
 

	
 
impl fmt::Display for ParseErrorStatement {
 
impl fmt::Display for ErrorStatement {
 
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 
        // Write kind of statement and message
 
        match self.statement_kind {
 
            StatementKind::Info => f.write_str(" INFO: ")?,
 
            StatementKind::Error => f.write_str("ERROR: ")?,
 
        }
 
        f.write_str(&self.message)?;
 
        f.write_char('\n')?;
 

	
 
        // Write originating file/line/column
 
        f.write_str(" +- ")?;
 
        if !self.filename.is_empty() {
 
            write!(f, "in {} ", self.filename)?;
 
        }
 

	
 
        match self.context_kind {
 
            ContextKind::SingleLine => writeln!(f, " at {}:{}", self.start_line, self.start_column),
 
            ContextKind::MultiLine => writeln!(
 
                f, " from {}:{} to {}:{}",
 
                self.start_line, self.start_column, self.end_line, self.end_column
 
            )
 
        }?;
 

	
 
        // Helper function for writing context: converting tabs into 4 spaces
 
        // (oh, the controversy!) and creating an annotated line
 
        fn transform_context(source: &str, target: &mut String) {
 
            for char in source.chars() {
 
                if char == '\t' {
 
                    target.push_str("    ");
 
                } else {
 
                    target.push(char);
 
                }
 
            }
 
        }
 

	
 
        fn extend_annotation(first_col: u32, last_col: u32, source: &str, target: &mut String, extend_char: char) {
 
            debug_assert!(first_col > 0 && last_col > first_col);
 

	
 
            // If the first index exceeds the size of the context then we should
 
            // have a message placed at the newline character
 
            let first_idx = first_col as usize - 1;
 
            let last_idx = last_col as usize - 1;
 
            if first_idx >= source.len() {
 
                // If any of these fail then the logic behind reporting errors
 
                // is incorrect.
 
                debug_assert_eq!(first_idx, source.len());
 
                debug_assert_eq!(first_idx + 1, last_idx);
 
                target.push(extend_char);
 
            } else {
 
                for (char_idx, char) in source.chars().enumerate().skip(first_idx) {
 
                    if char_idx == last_idx as usize {
 
                        break;
 
                    }
 

	
 
                    if char == '\t' {
 
                        for _ in 0..4 { target.push(extend_char); }
 
                    } else {
 
                        target.push(extend_char);
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // Write source context
 
        writeln!(f, " | ")?;
 

	
 
        let mut context = String::with_capacity(128);
 
        let mut annotation = String::with_capacity(128);
 

	
 
        match self.context_kind {
 
            ContextKind::SingleLine => {
 
                // Write single line of context with indicator for the offending
 
                // span underneath.
 
                context.push_str(" |  ");
 
                transform_context(&self.context, &mut context);
 
                context.push('\n');
 
                f.write_str(&context)?;
 

	
 
                annotation.push_str(" | ");
 
                extend_annotation(1, self.start_column + 1, &self.context, &mut annotation, ' ');
 
                extend_annotation(self.start_column, self.end_column, &self.context, &mut annotation, '~');
 
                annotation.push('\n');
 

	
 
                f.write_str(&annotation)?;
 
            },
 
            ContextKind::MultiLine => {
 
                // Annotate all offending lines
 
                // - first line
 
                let mut lines = self.context.lines();
 
                let first_line = lines.next().unwrap();
 
                transform_context(first_line, &mut context);
 
                writeln!(f, " |- {}", &context)?;
 

	
 
                // - remaining lines
 
                let mut last_line = first_line;
 
                while let Some(cur_line) = lines.next() {
 
                    context.clear();
 
                    transform_context(cur_line, &mut context);
 
                    writeln!(f, " |  {}", &context)?;
 
                    last_line = cur_line;
 
                }
 

	
 
                // - underline beneath last line
 
                annotation.push_str(" \\__");
 
                extend_annotation(1, self.end_column, &last_line, &mut annotation, '_');
 
                annotation.push_str("/\n");
 
                f.write_str(&annotation)?;
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 
}
 

	
 
#[derive(Debug)]
 
pub struct ParseError {
 
    pub(crate) statements: Vec<ParseErrorStatement>
 
    pub(crate) statements: Vec<ErrorStatement>
 
}
 

	
 
impl fmt::Display for ParseError {
 
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 
        if self.statements.is_empty() {
 
            return Ok(())
 
        }
 

	
 
        self.statements[0].fmt(f)?;
 
        for statement in self.statements.iter().skip(1) {
 
            writeln!(f)?;
 
            statement.fmt(f)?;
 
        }
 

	
 
        Ok(())
 
    }
 
}
 

	
 
impl ParseError {
 
    pub fn new_error_at_pos(source: &InputSource, position: InputPosition, message: String) -> Self {
 
        Self{ statements: vec!(ParseErrorStatement::from_source_at_pos(
 
        Self{ statements: vec!(ErrorStatement::from_source_at_pos(
 
            StatementKind::Error, source, position, message
 
        )) }
 
    }
 

	
 
    pub fn new_error_str_at_pos(source: &InputSource, position: InputPosition, message: &str) -> Self {
 
        Self{ statements: vec!(ParseErrorStatement::from_source_at_pos(
 
        Self{ statements: vec!(ErrorStatement::from_source_at_pos(
 
            StatementKind::Error, source, position, message.to_string()
 
        )) }
 
    }
 

	
 
    pub fn new_error_at_span(source: &InputSource, span: InputSpan, message: String) -> Self {
 
        Self{ statements: vec!(ParseErrorStatement::from_source_at_span(
 
        Self{ statements: vec!(ErrorStatement::from_source_at_span(
 
            StatementKind::Error, source, span, message
 
        )) }
 
    }
 

	
 
    pub fn new_error_str_at_span(source: &InputSource, span: InputSpan, message: &str) -> Self {
 
        Self{ statements: vec!(ParseErrorStatement::from_source_at_span(
 
        Self{ statements: vec!(ErrorStatement::from_source_at_span(
 
            StatementKind::Error, source, span, message.to_string()
 
        )) }
 
    }
 

	
 
    pub fn with_at_pos(mut self, error_type: StatementKind, source: &InputSource, position: InputPosition, message: String) -> Self {
 
        self.statements.push(ParseErrorStatement::from_source_at_pos(error_type, source, position, message));
 
        self.statements.push(ErrorStatement::from_source_at_pos(error_type, source, position, message));
 
        self
 
    }
 

	
 
    pub fn with_at_span(mut self, error_type: StatementKind, source: &InputSource, span: InputSpan, message: String) -> Self {
 
        self.statements.push(ParseErrorStatement::from_source_at_span(error_type, source, span, message.to_string()));
 
        self.statements.push(ErrorStatement::from_source_at_span(error_type, source, span, message.to_string()));
 
        self
 
    }
 

	
 
    pub fn with_info_at_pos(self, source: &InputSource, position: InputPosition, msg: String) -> Self {
 
        self.with_at_pos(StatementKind::Info, source, position, msg)
 
    }
 

	
 
    pub fn with_info_str_at_pos(self, source: &InputSource, position: InputPosition, msg: &str) -> Self {
 
        self.with_at_pos(StatementKind::Info, source, position, msg.to_string())
 
    }
 

	
 
    pub fn with_info_at_span(self, source: &InputSource, span: InputSpan, msg: String) -> Self {
 
        self.with_at_span(StatementKind::Info, source, span, msg)
 
    }
 

	
 
    pub fn with_info_str_at_span(self, source: &InputSource, span: InputSpan, msg: &str) -> Self {
 
        self.with_at_span(StatementKind::Info, source, span, msg.to_string())
 
    }
 
}
src/protocol/mod.rs
Show inline comments
 
mod arena;
 
mod eval;
 
pub(crate) mod input_source;
 
mod parser;
 
#[cfg(test)] mod tests;
 

	
 
pub(crate) mod ast;
 
pub(crate) mod ast_printer;
 

	
 
use std::sync::Mutex;
 

	
 
use crate::collections::{StringPool, StringRef};
 
use crate::common::*;
 
use crate::protocol::ast::*;
 
use crate::protocol::eval::*;
 
use crate::protocol::input_source::*;
 
use crate::protocol::parser::*;
 

	
 
/// A protocol description module
 
pub struct Module {
 
    pub(crate) source: InputSource,
 
    pub(crate) root_id: RootId,
 
    pub(crate) name: Option<StringRef<'static>>,
 
}
 
/// Description of a protocol object, used to configure new connectors.
 
#[repr(C)]
 
pub struct ProtocolDescription {
 
    modules: Vec<Module>,
 
    heap: Heap,
 
    source: InputSource,
 
    root: RootId,
 
    pool: Mutex<StringPool>,
 
}
 
#[derive(Debug, Clone)]
 
pub(crate) struct ComponentState {
 
    prompt: Prompt,
 
}
 
pub(crate) enum EvalContext<'a> {
 
    Nonsync(&'a mut NonsyncProtoContext<'a>),
 
    Sync(&'a mut SyncProtoContext<'a>),
 
    None,
 
}
 
//////////////////////////////////////////////
 

	
 
impl std::fmt::Debug for ProtocolDescription {
 
    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
 
        write!(f, "(An opaque protocol description)")
 
    }
 
}
 
impl ProtocolDescription {
 
    // TODO: Allow for multi-file compilation
 
    pub fn parse(buffer: &[u8]) -> Result<Self, String> {
 
        // TODO: @fixme, keep code compilable, but needs support for multiple
 
        //  input files.
 
        let source = InputSource::new(String::new(), Vec::from(buffer));
 
        let mut parser = Parser::new();
 
        parser.feed(source).expect("failed to feed source");
 
        
 
        if let Err(err) = parser.parse() {
 
            println!("ERROR:\n{}", err);
 
            return Err(format!("{}", err))
 
        }
 

	
 
        debug_assert_eq!(parser.modules.len(), 1, "only supporting one module here for now");
 
        let module = parser.modules.remove(0);
 
        let root = module.root_id;
 
        let source = module.source;
 
        return Ok(ProtocolDescription { heap: parser.heap, source, root });
 
        let modules: Vec<Module> = parser.modules.into_iter()
 
            .map(|module| Module{
 
                source: module.source,
 
                root_id: module.root_id,
 
                name: module.name.map(|(_, name)| name)
 
            })
 
            .collect();
 

	
 
        return Ok(ProtocolDescription {
 
            modules,
 
            heap: parser.heap,
 
            pool: Mutex::new(parser.string_pool),
 
        });
 
    }
 
    pub(crate) fn component_polarities(
 
        &self,
 
        module_name: &[u8],
 
        identifier: &[u8],
 
    ) -> Result<Vec<Polarity>, AddComponentError> {
 
        use AddComponentError::*;
 
        let h = &self.heap;
 
        let root = &h[self.root];
 
        let def = root.get_definition_ident(h, identifier);
 

	
 
        let module_root = self.lookup_module_root(module_name);
 
        if module_root.is_none() {
 
            return Err(AddComponentError::NoSuchModule);
 
        }
 
        let module_root = module_root.unwrap();
 

	
 
        let root = &self.heap[module_root];
 
        let def = root.get_definition_ident(&self.heap, identifier);
 
        if def.is_none() {
 
            return Err(NoSuchComponent);
 
        }
 
        let def = &h[def.unwrap()];
 

	
 
        let def = &self.heap[def.unwrap()];
 
        if !def.is_component() {
 
            return Err(NoSuchComponent);
 
        }
 

	
 
        for &param in def.parameters().iter() {
 
            let param = &h[param];
 
            let param = &self.heap[param];
 
            let first_element = &param.parser_type.elements[0];
 

	
 
            match first_element.variant {
 
                ParserTypeVariant::Input | ParserTypeVariant::Output => continue,
 
                _ => {
 
                    return Err(NonPortTypeParameters);
 
                }
 
            }
 
        }
 

	
 
        let mut result = Vec::new();
 
        for &param in def.parameters().iter() {
 
            let param = &h[param];
 
            let param = &self.heap[param];
 
            let first_element = &param.parser_type.elements[0];
 

	
 
            if first_element.variant == ParserTypeVariant::Input {
 
                result.push(Polarity::Getter)
 
            } else if first_element.variant == ParserTypeVariant::Output {
 
                result.push(Polarity::Putter)
 
            } else {
 
                unreachable!()
 
            }
 
        }
 
        Ok(result)
 
    }
 
    // expects port polarities to be correct
 
    pub(crate) fn new_component(&self, identifier: &[u8], ports: &[PortId]) -> ComponentState {
 
    pub(crate) fn new_component(&self, module_name: &[u8], identifier: &[u8], ports: &[PortId]) -> ComponentState {
 
        let mut args = Vec::new();
 
        for (&x, y) in ports.iter().zip(self.component_polarities(identifier).unwrap()) {
 
        for (&x, y) in ports.iter().zip(self.component_polarities(module_name, identifier).unwrap()) {
 
            match y {
 
                Polarity::Getter => args.push(Value::Input(x)),
 
                Polarity::Putter => args.push(Value::Output(x)),
 
            }
 
        }
 
        let h = &self.heap;
 
        let root = &h[self.root];
 
        let def = root.get_definition_ident(h, identifier).unwrap();
 
        ComponentState { prompt: Prompt::new(h, def, ValueGroup::new_stack(args)) }
 

	
 
        let module_root = self.lookup_module_root(module_name).unwrap();
 
        let root = &self.heap[module_root];
 
        let def = root.get_definition_ident(&self.heap, identifier).unwrap();
 
        ComponentState { prompt: Prompt::new(&self.heap, def, ValueGroup::new_stack(args)) }
 
    }
 

	
 
    fn lookup_module_root(&self, module_name: &[u8]) -> Option<RootId> {
 
        for module in self.modules.iter() {
 
            match &module.name {
 
                Some(name) => if name.as_bytes() == module_name {
 
                    return Some(module.root_id);
 
                },
 
                None => if module_name.is_empty() {
 
                    return Some(module.root_id);
 
                }
 
            }
 
        }
 

	
 
        return None;
 
    }
 
}
 
impl ComponentState {
 
    pub(crate) fn nonsync_run<'a: 'b, 'b>(
 
        &'a mut self,
 
        context: &'b mut NonsyncProtoContext<'b>,
 
        pd: &'a ProtocolDescription,
 
    ) -> NonsyncBlocker {
 
        let mut context = EvalContext::Nonsync(context);
 
        loop {
 
            let result = self.prompt.step(&pd.heap, &mut context);
 
            let result = self.prompt.step(&pd.heap, &pd.modules, &mut context);
 
            match result {
 
                Err(_) => todo!("error handling"),
 
                Err(err) => {
 
                    println!("Evaluation error:\n{}", err);
 
                    panic!("proper error handling when component fails");
 
                },
 
                Ok(cont) => match cont {
 
                    EvalContinuation::Stepping => continue,
 
                    EvalContinuation::Inconsistent => return NonsyncBlocker::Inconsistent,
 
                    EvalContinuation::Terminal => return NonsyncBlocker::ComponentExit,
 
                    EvalContinuation::SyncBlockStart => return NonsyncBlocker::SyncBlockStart,
 
                    // Not possible to end sync block if never entered one
 
                    EvalContinuation::SyncBlockEnd => unreachable!(),
 
                    EvalContinuation::NewComponent(definition_id, args) => {
 
                        // Look up definition (TODO for now, assume it is a definition)
 
                        let mut moved_ports = HashSet::new();
 
                        for arg in args.values.iter() {
 
                            match arg {
 
                                Value::Output(port) => {
 
                                    moved_ports.insert(*port);
 
                                }
 
                                Value::Input(port) => {
 
                                    moved_ports.insert(*port);
 
                                }
 
                                _ => {}
 
                            }
 
                        }
 
                        for region in args.regions.iter() {
 
                            for arg in region {
 
                                match arg {
 
                                    Value::Output(port) => { moved_ports.insert(*port); },
 
                                    Value::Input(port) => { moved_ports.insert(*port); },
 
                                    _ => {},
 
                                }
 
                            }
 
                        }
 
                        let h = &pd.heap;
 
                        let init_state = ComponentState { prompt: Prompt::new(h, definition_id, args) };
 
                        context.new_component(moved_ports, init_state);
 
                        // Continue stepping
 
                        continue;
 
                    }
 
                    // Outside synchronous blocks, no fires/get/put happens
 
                    EvalContinuation::BlockFires(_) => unreachable!(),
 
                    EvalContinuation::BlockGet(_) => unreachable!(),
 
                    EvalContinuation::Put(_, _) => unreachable!(),
 
                },
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn sync_run<'a: 'b, 'b>(
 
        &'a mut self,
 
        context: &'b mut SyncProtoContext<'b>,
 
        pd: &'a ProtocolDescription,
 
    ) -> SyncBlocker {
 
        let mut context = EvalContext::Sync(context);
 
        loop {
 
            let result = self.prompt.step(&pd.heap, &mut context);
 
            let result = self.prompt.step(&pd.heap, &pd.modules, &mut context);
 
            match result {
 
                Err(_) => todo!("error handling"),
 
                Err(err) => {
 
                    println!("Evaluation error:\n{}", err);
 
                    panic!("proper error handling when component fails");
 
                },
 
                Ok(cont) => match cont {
 
                    EvalContinuation::Stepping => continue,
 
                    EvalContinuation::Inconsistent => return SyncBlocker::Inconsistent,
 
                    // First need to exit synchronous block before definition may end
 
                    EvalContinuation::Terminal => unreachable!(),
 
                    // No nested synchronous blocks
 
                    EvalContinuation::SyncBlockStart => unreachable!(),
 
                    EvalContinuation::SyncBlockEnd => return SyncBlocker::SyncBlockEnd,
 
                    // Not possible to create component in sync block
 
                    EvalContinuation::NewComponent(_, _) => unreachable!(),
 
                    EvalContinuation::BlockFires(port) => match port {
 
                        Value::Output(port) => {
 
                            return SyncBlocker::CouldntCheckFiring(port);
 
                        }
 
                        Value::Input(port) => {
 
                            return SyncBlocker::CouldntCheckFiring(port);
 
                        }
 
                        _ => unreachable!(),
 
                    },
 
                    EvalContinuation::BlockGet(port) => match port {
 
                        Value::Output(port) => {
 
                            return SyncBlocker::CouldntReadMsg(port);
 
                        }
 
                        Value::Input(port) => {
 
                            return SyncBlocker::CouldntReadMsg(port);
 
                        }
 
                        _ => unreachable!(),
 
                    },
 
                    EvalContinuation::Put(port, message) => {
 
                        let value;
 
                        match port {
 
                            Value::Output(port_value) => {
 
                                value = port_value;
 
                            }
 
                            Value::Input(port_value) => {
 
                                value = port_value;
 
                            }
 
                            _ => unreachable!(),
 
                        }
 
                        let payload;
 
                        match message {
 
                            Value::Null => {
 
                                return SyncBlocker::Inconsistent;
 
                            },
 
                            Value::Message(heap_pos) => {
 
                                // Create a copy of the payload
 
                                let values = &self.prompt.store.heap_regions[heap_pos as usize].values;
 
                                let mut bytes = Vec::with_capacity(values.len());
 
                                for value in values {
 
                                    bytes.push(value.as_uint8());
 
                                }
 
                                payload = Payload(Arc::new(bytes));
 
                            }
 
                            _ => unreachable!(),
 
                        }
 
                        return SyncBlocker::PutMsg(value, payload);
 
                    }
 
                },
 
            }
 
        }
 
    }
 
}
 
impl EvalContext<'_> {
 
    // fn random(&mut self) -> LongValue {
 
    //     match self {
 
    //         // EvalContext::None => unreachable!(),
 
    //         EvalContext::Nonsync(_context) => todo!(),
 
    //         EvalContext::Sync(_) => unreachable!(),
 
    //     }
 
    // }
 
    fn new_component(&mut self, moved_ports: HashSet<PortId>, init_state: ComponentState) -> () {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(context) => {
 
                context.new_component(moved_ports, init_state)
 
            }
 
            EvalContext::Sync(_) => unreachable!(),
 
        }
 
    }
 
    fn new_channel(&mut self) -> [Value; 2] {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(context) => {
 
                let [from, to] = context.new_port_pair();
 
                let from = Value::Output(from);
 
                let to = Value::Input(to);
 
                return [from, to];
 
            }
 
            EvalContext::Sync(_) => unreachable!(),
 
        }
 
    }
 
    fn fires(&mut self, port: Value) -> Option<Value> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(context) => match port {
 
                Value::Output(port) => context.is_firing(port).map(Value::Bool),
 
                Value::Input(port) => context.is_firing(port).map(Value::Bool),
 
                _ => unreachable!(),
 
            },
 
        }
 
    }
 
    fn get(&mut self, port: Value, store: &mut Store) -> Option<Value> {
 
        match self {
 
            EvalContext::None => unreachable!(),
 
            EvalContext::Nonsync(_) => unreachable!(),
 
            EvalContext::Sync(context) => match port {
 
                Value::Output(port) => {
 
                    debug_assert!(false, "Getting from an output port? Am I mad?");
 
                    unreachable!();
 
                }
 
                Value::Input(port) => {
 
                    let heap_pos = store.alloc_heap();
 
                    let heap_pos_usize = heap_pos as usize;
 

	
 
                    let payload = context.read_msg(port);
 
                    if payload.is_none() { return None; }
 

	
 
                    let payload = payload.unwrap();
 
                    store.heap_regions[heap_pos_usize].values.reserve(payload.0.len());
 
                    for value in payload.0.iter() {
 
                        store.heap_regions[heap_pos_usize].values.push(Value::UInt8(*value));
 
                    }
 

	
 
                    return Some(Value::Message(heap_pos));
 
                }
 
                _ => unreachable!(),
 
            },
 
        }
 
    }
 
    fn did_put(&mut self, port: Value) -> bool {
 
        match self {
 
            EvalContext::None => unreachable!("did_put in None context"),
 
            EvalContext::Nonsync(_) => unreachable!("did_put in nonsync context"),
 
            EvalContext::Sync(context) => match port {
 
                Value::Output(port) => {
 
                    context.did_put_or_get(port)
 
                },
 
                Value::Input(_) => unreachable!("did_put on input port"),
 
                _ => unreachable!("did_put on non-port value")
 
            }
 
        }
 
    }
 
}
src/protocol/parser/pass_definitions.rs
Show inline comments
 
@@ -207,412 +207,413 @@ impl PassDefinitions {
 
            |source, iter, ctx| {
 
                let identifier = consume_ident_interned(source, iter, ctx)?;
 
                let mut close_pos = identifier.span.end;
 

	
 
                let mut types_section = self.parser_types.start_section();
 

	
 
                let has_embedded = maybe_consume_comma_separated(
 
                    TokenKind::OpenParen, TokenKind::CloseParen, source, iter, ctx,
 
                    |source, iter, ctx| {
 
                        let poly_vars = ctx.heap[definition_id].poly_vars(); // TODO: @Cleanup, this is really ugly. But rust...
 
                        consume_parser_type(
 
                            source, iter, &ctx.symbols, &ctx.heap, poly_vars,
 
                            module_scope, definition_id, false, 0
 
                        )
 
                    },
 
                    &mut types_section, "an embedded type", Some(&mut close_pos)
 
                )?;
 
                let value = if has_embedded {
 
                    UnionVariantValue::Embedded(types_section.into_vec())
 
                } else {
 
                    types_section.forget();
 
                    UnionVariantValue::None
 
                };
 

	
 
                Ok(UnionVariantDefinition{
 
                    span: InputSpan::from_positions(identifier.span.begin, close_pos),
 
                    identifier,
 
                    value
 
                })
 
            },
 
            &mut variants_section, "a union variant", "a list of union variants", None
 
        )?;
 

	
 
        // Transfer to AST
 
        let union_def = ctx.heap[definition_id].as_union_mut();
 
        union_def.variants = variants_section.into_vec();
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_function_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        consume_exact_ident(&module.source, iter, KW_FUNCTION)?;
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated DefinitionId
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        // Parse function's argument list
 
        let mut parameter_section = self.variables.start_section();
 
        consume_parameter_list(
 
            &module.source, iter, ctx, &mut parameter_section, module_scope, definition_id
 
        )?;
 
        let parameters = parameter_section.into_vec();
 

	
 
        // Consume return types
 
        consume_token(&module.source, iter, TokenKind::ArrowRight)?;
 
        let mut return_types = self.parser_types.start_section();
 
        let mut open_curly_pos = iter.last_valid_pos(); // bogus value
 
        consume_comma_separated_until(
 
            TokenKind::OpenCurly, &module.source, iter, ctx,
 
            |source, iter, ctx| {
 
                let poly_vars = ctx.heap[definition_id].poly_vars(); // TODO: @Cleanup, this is really ugly. But rust...
 
                consume_parser_type(source, iter, &ctx.symbols, &ctx.heap, poly_vars, module_scope, definition_id, false, 0)
 
            },
 
            &mut return_types, "a return type", Some(&mut open_curly_pos)
 
        )?;
 
        let return_types = return_types.into_vec();
 

	
 
        // TODO: @ReturnValues
 
        match return_types.len() {
 
            0 => return Err(ParseError::new_error_str_at_pos(&module.source, open_curly_pos, "expected a return type")),
 
            1 => {},
 
            _ => return Err(ParseError::new_error_str_at_pos(&module.source, open_curly_pos, "multiple return types are not (yet) allowed")),
 
        }
 

	
 
        // Consume block
 
        let body = self.consume_block_statement_without_leading_curly(module, iter, ctx, open_curly_pos)?;
 

	
 
        // Assign everything in the preallocated AST node
 
        let function = ctx.heap[definition_id].as_function_mut();
 
        function.return_types = return_types;
 
        function.parameters = parameters;
 
        function.body = body;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_component_definition(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<(), ParseError> {
 
        let (_variant_text, _) = consume_any_ident(&module.source, iter)?;
 
        debug_assert!(_variant_text == KW_PRIMITIVE || _variant_text == KW_COMPOSITE);
 
        let (ident_text, _) = consume_ident(&module.source, iter)?;
 

	
 
        // Retrieve preallocated definition
 
        let module_scope = SymbolScope::Module(module.root_id);
 
        let definition_id = ctx.symbols.get_symbol_by_name_defined_in_scope(module_scope, ident_text)
 
            .unwrap().variant.as_definition().definition_id;
 
        self.cur_definition = definition_id;
 

	
 
        consume_polymorphic_vars_spilled(&module.source, iter, ctx)?;
 

	
 
        // Parse component's argument list
 
        let mut parameter_section = self.variables.start_section();
 
        consume_parameter_list(
 
            &module.source, iter, ctx, &mut parameter_section, module_scope, definition_id
 
        )?;
 
        let parameters = parameter_section.into_vec();
 

	
 
        // Consume block
 
        let body = self.consume_block_statement(module, iter, ctx)?;
 

	
 
        // Assign everything in the AST node
 
        let component = ctx.heap[definition_id].as_component_mut();
 
        component.parameters = parameters;
 
        component.body = body;
 

	
 
        Ok(())
 
    }
 

	
 
    /// Consumes a block statement. If the resulting statement is not a block
 
    /// (e.g. for a shorthand "if (expr) single_statement") then it will be
 
    /// wrapped in one
 
    fn consume_block_or_wrapped_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<BlockStatementId, ParseError> {
 
        if Some(TokenKind::OpenCurly) == iter.next() {
 
            // This is a block statement
 
            self.consume_block_statement(module, iter, ctx)
 
        } else {
 
            // Not a block statement, so wrap it in one
 
            let mut statements = self.statements.start_section();
 
            let wrap_begin_pos = iter.last_valid_pos();
 
            self.consume_statement(module, iter, ctx, &mut statements)?;
 
            let wrap_end_pos = iter.last_valid_pos();
 

	
 
            debug_assert_eq!(statements.len(), 1);
 
            let statements = statements.into_vec();
 

	
 
            let id = ctx.heap.alloc_block_statement(|this| BlockStatement{
 
                this,
 
                is_implicit: true,
 
                span: InputSpan::from_positions(wrap_begin_pos, wrap_end_pos), // TODO: @Span
 
                statements,
 
                end_block: EndBlockStatementId::new_invalid(),
 
                parent_scope: Scope::Definition(DefinitionId::new_invalid()),
 
                first_unique_id_in_scope: -1,
 
                next_unique_id_in_scope: -1,
 
                relative_pos_in_parent: 0,
 
                locals: Vec::new(),
 
                labels: Vec::new()
 
            });
 

	
 
            let end_block = ctx.heap.alloc_end_block_statement(|this| EndBlockStatement{
 
                this, start_block: id, next: StatementId::new_invalid()
 
            });
 

	
 
            let block_stmt = &mut ctx.heap[id];
 
            block_stmt.end_block = end_block;
 

	
 
            Ok(id)
 
        }
 
    }
 

	
 
    /// Consumes a statement and returns a boolean indicating whether it was a
 
    /// block or not.
 
    fn consume_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx, section: &mut ScopedSection<StatementId>
 
    ) -> Result<(), ParseError> {
 
        let next = iter.next().expect("consume_statement has a next token");
 

	
 
        if next == TokenKind::OpenCurly {
 
            let id = self.consume_block_statement(module, iter, ctx)?;
 
            section.push(id.upcast());
 
        } else if next == TokenKind::Ident {
 
            let ident = peek_ident(&module.source, iter).unwrap();
 
            if ident == KW_STMT_IF {
 
                // Consume if statement and place end-if statement directly
 
                // after it.
 
                let id = self.consume_if_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 

	
 
                let end_if = ctx.heap.alloc_end_if_statement(|this| EndIfStatement{
 
                    this, start_if: id, next: StatementId::new_invalid()
 
                });
 
                section.push(id.upcast());
 
                section.push(end_if.upcast());
 

	
 
                let if_stmt = &mut ctx.heap[id];
 
                if_stmt.end_if = end_if;
 
            } else if ident == KW_STMT_WHILE {
 
                let id = self.consume_while_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 

	
 
                let end_while = ctx.heap.alloc_end_while_statement(|this| EndWhileStatement{
 
                    this, start_while: id, next: StatementId::new_invalid()
 
                });
 
                section.push(id.upcast());
 
                section.push(end_while.upcast());
 

	
 
                let while_stmt = &mut ctx.heap[id];
 
                while_stmt.end_while = end_while;
 
            } else if ident == KW_STMT_BREAK {
 
                let id = self.consume_break_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 
            } else if ident == KW_STMT_CONTINUE {
 
                let id = self.consume_continue_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 
            } else if ident == KW_STMT_SYNC {
 
                let id = self.consume_synchronous_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 

	
 
                let end_sync = ctx.heap.alloc_end_synchronous_statement(|this| EndSynchronousStatement{
 
                    this, start_sync: id, next: StatementId::new_invalid()
 
                });
 
                section.push(end_sync.upcast());
 

	
 
                let sync_stmt = &mut ctx.heap[id];
 
                sync_stmt.end_sync = end_sync;
 
            } else if ident == KW_STMT_RETURN {
 
                let id = self.consume_return_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 
            } else if ident == KW_STMT_GOTO {
 
                let id = self.consume_goto_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 
            } else if ident == KW_STMT_NEW {
 
                let id = self.consume_new_statement(module, iter, ctx)?;
 
                section.push(id.upcast());
 
            } else if ident == KW_STMT_CHANNEL {
 
                let id = self.consume_channel_statement(module, iter, ctx)?;
 
                section.push(id.upcast().upcast());
 
            } else if iter.peek() == Some(TokenKind::Colon) {
 
                self.consume_labeled_statement(module, iter, ctx, section)?;
 
            } else {
 
                // Two fallback possibilities: the first one is a memory
 
                // declaration, the other one is to parse it as a regular
 
                // expression. This is a bit ugly
 
                if let Some((memory_stmt_id, assignment_stmt_id)) = self.maybe_consume_memory_statement(module, iter, ctx)? {
 
                    section.push(memory_stmt_id.upcast().upcast());
 
                    section.push(assignment_stmt_id.upcast());
 
                } else {
 
                    let id = self.consume_expression_statement(module, iter, ctx)?;
 
                    section.push(id.upcast());
 
                }
 
            }
 
        } else {
 
            let id = self.consume_expression_statement(module, iter, ctx)?;
 
            section.push(id.upcast());
 
        }
 

	
 
        return Ok(());
 
    }
 

	
 
    fn consume_block_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<BlockStatementId, ParseError> {
 
        let open_span = consume_token(&module.source, iter, TokenKind::OpenCurly)?;
 
        self.consume_block_statement_without_leading_curly(module, iter, ctx, open_span.begin)
 
    }
 

	
 
    fn consume_block_statement_without_leading_curly(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx, open_curly_pos: InputPosition
 
    ) -> Result<BlockStatementId, ParseError> {
 
        let mut stmt_section = self.statements.start_section();
 
        let mut next = iter.next();
 
        while next != Some(TokenKind::CloseCurly) {
 
            if next.is_none() {
 
                return Err(ParseError::new_error_str_at_pos(
 
                    &module.source, iter.last_valid_pos(), "expected a statement or '}'"
 
                ));
 
            }
 
            self.consume_statement(module, iter, ctx, &mut stmt_section)?;
 
            next = iter.next();
 
        }
 

	
 
        let statements = stmt_section.into_vec();
 
        let mut block_span = consume_token(&module.source, iter, TokenKind::CloseCurly)?;
 
        block_span.begin = open_curly_pos;
 

	
 
        let id = ctx.heap.alloc_block_statement(|this| BlockStatement{
 
            this,
 
            is_implicit: false,
 
            span: block_span,
 
            statements,
 
            end_block: EndBlockStatementId::new_invalid(),
 
            parent_scope: Scope::Definition(DefinitionId::new_invalid()),
 
            first_unique_id_in_scope: -1,
 
            next_unique_id_in_scope: -1,
 
            relative_pos_in_parent: 0,
 
            locals: Vec::new(),
 
            labels: Vec::new(),
 
        });
 

	
 
        let end_block = ctx.heap.alloc_end_block_statement(|this| EndBlockStatement{
 
            this, start_block: id, next: StatementId::new_invalid()
 
        });
 

	
 
        let block_stmt = &mut ctx.heap[id];
 
        block_stmt.end_block = end_block;
 

	
 
        Ok(id)
 
    }
 

	
 
    fn consume_if_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<IfStatementId, ParseError> {
 
        let if_span = consume_exact_ident(&module.source, iter, KW_STMT_IF)?;
 
        consume_token(&module.source, iter, TokenKind::OpenParen)?;
 
        let test = self.consume_expression(module, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::CloseParen)?;
 
        let true_body = self.consume_block_or_wrapped_statement(module, iter, ctx)?;
 

	
 
        let false_body = if has_ident(&module.source, iter, KW_STMT_ELSE) {
 
            iter.consume();
 
            let false_body = self.consume_block_or_wrapped_statement(module, iter, ctx)?;
 
            Some(false_body)
 
        } else {
 
            None
 
        };
 

	
 
        Ok(ctx.heap.alloc_if_statement(|this| IfStatement{
 
            this,
 
            span: if_span,
 
            test,
 
            true_body,
 
            false_body,
 
            end_if: EndIfStatementId::new_invalid(),
 
        }))
 
    }
 

	
 
    fn consume_while_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<WhileStatementId, ParseError> {
 
        let while_span = consume_exact_ident(&module.source, iter, KW_STMT_WHILE)?;
 
        consume_token(&module.source, iter, TokenKind::OpenParen)?;
 
        let test = self.consume_expression(module, iter, ctx)?;
 
        consume_token(&module.source, iter, TokenKind::CloseParen)?;
 
        let body = self.consume_block_or_wrapped_statement(module, iter, ctx)?;
 

	
 
        Ok(ctx.heap.alloc_while_statement(|this| WhileStatement{
 
            this,
 
            span: while_span,
 
            test,
 
            body,
 
            end_while: EndWhileStatementId::new_invalid(),
 
            in_sync: None,
 
        }))
 
    }
 

	
 
    fn consume_break_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<BreakStatementId, ParseError> {
 
        let break_span = consume_exact_ident(&module.source, iter, KW_STMT_BREAK)?;
 
        let label = if Some(TokenKind::Ident) == iter.next() {
 
            let label = consume_ident_interned(&module.source, iter, ctx)?;
 
            Some(label)
 
        } else {
 
            None
 
        };
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
        Ok(ctx.heap.alloc_break_statement(|this| BreakStatement{
 
            this,
 
            span: break_span,
 
            label,
 
            target: None,
 
        }))
 
    }
 

	
 
    fn consume_continue_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<ContinueStatementId, ParseError> {
 
        let continue_span = consume_exact_ident(&module.source, iter, KW_STMT_CONTINUE)?;
 
        let label=  if Some(TokenKind::Ident) == iter.next() {
 
            let label = consume_ident_interned(&module.source, iter, ctx)?;
 
            Some(label)
 
        } else {
 
            None
 
        };
 
        consume_token(&module.source, iter, TokenKind::SemiColon)?;
 
        Ok(ctx.heap.alloc_continue_statement(|this| ContinueStatement{
 
            this,
 
            span: continue_span,
 
            label,
 
            target: None
 
        }))
 
    }
 

	
 
    fn consume_synchronous_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<SynchronousStatementId, ParseError> {
 
        let synchronous_span = consume_exact_ident(&module.source, iter, KW_STMT_SYNC)?;
 
        let body = self.consume_block_or_wrapped_statement(module, iter, ctx)?;
 

	
 
        Ok(ctx.heap.alloc_synchronous_statement(|this| SynchronousStatement{
 
            this,
 
            span: synchronous_span,
 
            body,
 
            end_sync: EndSynchronousStatementId::new_invalid(),
 
            parent_scope: None,
 
        }))
 
    }
 

	
 
    fn consume_return_statement(
 
        &mut self, module: &Module, iter: &mut TokenIter, ctx: &mut PassCtx
 
    ) -> Result<ReturnStatementId, ParseError> {
 
        let return_span = consume_exact_ident(&module.source, iter, KW_STMT_RETURN)?;
 
        let mut scoped_section = self.expressions.start_section();
 

	
src/protocol/parser/pass_typing.rs
Show inline comments
 
/// pass_typing
 
///
 
/// Performs type inference and type checking. Type inference is implemented by
 
/// applying constraints on (sub)trees of types. During this process the
 
/// resolver takes the `ParserType` structs (the representation of the types
 
/// written by the programmer), converts them to `InferenceType` structs (the
 
/// temporary data structure used during type inference) and attempts to arrive
 
/// at `ConcreteType` structs (the representation of a fully checked and
 
/// validated type).
 
///
 
/// The resolver will visit every statement and expression relevant to the
 
/// procedure and insert and determine its initial type based on context (e.g. a
 
/// return statement's expression must match the function's return type, an
 
/// if statement's test expression must evaluate to a boolean). When all are
 
/// visited we attempt to make progress in evaluating the types. Whenever a type
 
/// is progressed we queue the related expressions for further type progression.
 
/// Once no more expressions are in the queue the algorithm is finished. At this
 
/// point either all types are inferred (or can be trivially implicitly
 
/// determined), or we have incomplete types. In the latter casee we return an
 
/// error.
 
///
 
/// Inference may be applied on non-polymorphic procedures and on polymorphic
 
/// procedures. When dealing with a non-polymorphic procedure we apply the type
 
/// resolver and annotate the AST with the `ConcreteType`s. When dealing with
 
/// polymorphic procedures we will only annotate the AST once, preserving
 
/// references to polymorphic variables. Any later pass will perform just the
 
/// type checking.
 
///
 
/// TODO: Needs a thorough rewrite:
 
///  0. polymorph_progress is intentionally broken at the moment.
 
///  1. For polymorphic type inference we need to have an extra datastructure
 
///     for progressing the polymorphic variables and mapping them back to each
 
///     signature type that uses that polymorphic type. The two types of markers
 
///     became somewhat of a mess.
 
///  2. We're doing a lot of extra work. It seems better to apply the initial
 
///     type based on expression parents, then to apply forced constraints (arg
 
///     to a fires() call must be port-like), only then to start progressing the
 
///     types.
 
///     Furthermore, queueing of expressions can be more intelligent, currently
 
///     every child/parent of an expression is inferred again when queued. Hence
 
///     we need to queue only specific children/parents of expressions.
 
///  3. Remove the `msg` type?
 
///  4. Disallow certain types in certain operations (e.g. `Void`).
 
///  5. Implement implicit and explicit casting.
 
///  6. Investigate different ways of performing the type-on-type inference,
 
///     maybe there is a better way then flattened trees + markers?
 

	
 
macro_rules! debug_log_enabled {
 
    () => { false };
 
}
 

	
 
macro_rules! debug_log {
 
    ($format:literal) => {
 
        enabled_debug_print!(false, "types", $format);
 
    };
 
    ($format:literal, $($args:expr),*) => {
 
        enabled_debug_print!(false, "types", $format, $($args),*);
 
    };
 
}
 

	
 
use std::collections::{HashMap, HashSet};
 

	
 
use crate::protocol::ast::*;
 
use crate::protocol::input_source::ParseError;
 
use crate::protocol::parser::ModuleCompilationPhase;
 
use crate::protocol::parser::type_table::*;
 
use crate::protocol::parser::token_parsing::*;
 
use super::visitor::{
 
    STMT_BUFFER_INIT_CAPACITY,
 
    EXPR_BUFFER_INIT_CAPACITY,
 
    Ctx,
 
    Visitor2,
 
    VisitorResult
 
};
 
use std::collections::hash_map::Entry;
 

	
 
const VOID_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Void ];
 
const MESSAGE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Message, InferenceTypePart::UInt8 ];
 
const BOOL_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Bool ];
 
const CHARACTER_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::Character ];
 
const STRING_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::String ];
 
const NUMBERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::NumberLike ];
 
const INTEGERLIKE_TEMPLATE: [InferenceTypePart; 1] = [ InferenceTypePart::IntegerLike ];
 
const ARRAY_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::Array, InferenceTypePart::Unknown ];
 
const ARRAYLIKE_TEMPLATE: [InferenceTypePart; 2] = [ InferenceTypePart::ArrayLike, InferenceTypePart::Unknown ];
 

	
 
/// TODO: @performance Turn into PartialOrd+Ord to simplify checks
 
/// TODO: @types Remove the Message -> Byte hack at some point...
 
#[derive(Debug, Clone, Eq, PartialEq)]
 
pub(crate) enum InferenceTypePart {
 
    // A marker with an identifier which we can use to retrieve the type subtree
 
    // that follows the marker. This is used to perform type inference on
 
    // polymorphs: an expression may determine the polymorphs type, after we
 
    // need to apply that information to all other places where the polymorph is
 
    // used.
 
    MarkerDefinition(usize), // marker for polymorph types on a procedure's definition
 
    MarkerBody(usize), // marker for polymorph types within a procedure body
 
    // Completely unknown type, needs to be inferred
 
    Unknown,
 
    // Partially known type, may be inferred to to be the appropriate related 
 
    // type.
 
    // IndexLike,      // index into array/slice
 
    NumberLike,     // any kind of integer/float
 
    IntegerLike,    // any kind of integer
 
    ArrayLike,      // array or slice. Note that this must have a subtype
 
    PortLike,       // input or output port
 
    // Special types that cannot be instantiated by the user
 
    Void, // For builtin functions that do not return anything
 
    // Concrete types without subtypes
 
    Bool,
 
    UInt8,
 
    UInt16,
 
    UInt32,
 
    UInt64,
 
    SInt8,
 
    SInt16,
 
    SInt32,
 
    SInt64,
 
    Character,
 
    String,
 
    // One subtype
 
    Message,
 
    Array,
 
    Slice,
 
    Input,
 
    Output,
 
    // A user-defined type with any number of subtypes
 
    Instance(DefinitionId, usize)
 
}
 

	
 
impl InferenceTypePart {
 
    fn is_marker(&self) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        match self {
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if the type is concrete, markers are interpreted as concrete
 
    /// types.
 
    fn is_concrete(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | 
 
            ITP::ArrayLike | ITP::PortLike => false,
 
            _ => true
 
        }
 
    }
 

	
 
    fn is_concrete_number(&self) -> bool {
 
        // TODO: @float
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_integer(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
        ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_msg_array_or_slice(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Array | ITP::Slice | ITP::Message => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    fn is_concrete_port(&self) -> bool {
 
        use InferenceTypePart as ITP;
 
        match self {
 
            ITP::Input | ITP::Output => true,
 
            _ => false,
 
        }
 
    }
 

	
 
    /// Checks if a part is less specific than the argument. Only checks for 
 
    /// single-part inference (i.e. not the replacement of an `Unknown` variant 
 
    /// with the argument)
 
    fn may_be_inferred_from(&self, arg: &InferenceTypePart) -> bool {
 
        use InferenceTypePart as ITP;
 

	
 
        (*self == ITP::IntegerLike && arg.is_concrete_integer()) ||
 
        (*self == ITP::NumberLike && (arg.is_concrete_number() || *arg == ITP::IntegerLike)) ||
 
        (*self == ITP::ArrayLike && arg.is_concrete_msg_array_or_slice()) ||
 
        (*self == ITP::PortLike && arg.is_concrete_port())
 
    }
 

	
 
    /// Returns the change in "iteration depth" when traversing this particular
 
    /// part. The iteration depth is used to traverse the tree in a linear 
 
    /// fashion. It is basically `number_of_subtypes - 1`
 
    fn depth_change(&self) -> i32 {
 
        use InferenceTypePart as ITP;
 
        match &self {
 
            ITP::Unknown | ITP::NumberLike | ITP::IntegerLike |
 
            ITP::Void | ITP::Bool |
 
            ITP::UInt8 | ITP::UInt16 | ITP::UInt32 | ITP::UInt64 |
 
            ITP::SInt8 | ITP::SInt16 | ITP::SInt32 | ITP::SInt64 |
 
            ITP::Character | ITP::String => {
 
                -1
 
            },
 
            ITP::MarkerDefinition(_) | ITP::MarkerBody(_) |
 
            ITP::ArrayLike | ITP::Message | ITP::Array | ITP::Slice |
 
            ITP::PortLike | ITP::Input | ITP::Output => {
 
                // One subtype, so do not modify depth
 
                0
 
            },
 
            ITP::Instance(_, num_args) => {
 
                (*num_args as i32) - 1
 
            }
 
        }
 
    }
 
}
 

	
 
impl From<ConcreteTypePart> for InferenceTypePart {
 
    fn from(v: ConcreteTypePart) -> InferenceTypePart {
 
        use ConcreteTypePart as CTP;
 
        use InferenceTypePart as ITP;
 

	
 
        match v {
 
            CTP::Marker(_) => {
 
                unreachable!("encountered marker while converting concrete type to inferred type");
 
            }
 
            CTP::Void => ITP::Void,
 
            CTP::Message => ITP::Message,
 
            CTP::Bool => ITP::Bool,
 
            CTP::UInt8 => ITP::UInt8,
 
            CTP::UInt16 => ITP::UInt16,
 
            CTP::UInt32 => ITP::UInt32,
 
            CTP::UInt64 => ITP::UInt64,
 
            CTP::SInt8 => ITP::SInt8,
 
            CTP::SInt16 => ITP::SInt16,
 
            CTP::SInt32 => ITP::SInt32,
 
            CTP::SInt64 => ITP::SInt64,
 
            CTP::Character => ITP::Character,
 
            CTP::String => ITP::String,
 
            CTP::Array => ITP::Array,
 
            CTP::Slice => ITP::Slice,
 
            CTP::Input => ITP::Input,
 
            CTP::Output => ITP::Output,
 
            CTP::Instance(id, num) => ITP::Instance(id, num),
 
        }
 
    }
 
}
 

	
 
#[derive(Debug)]
 
struct InferenceType {
 
    has_body_marker: bool,
 
    is_done: bool,
 
    parts: Vec<InferenceTypePart>,
 
}
 

	
 
impl InferenceType {
 
    /// Generates a new InferenceType. The two boolean flags will be checked in
 
    /// debug mode.
 
    fn new(has_body_marker: bool, is_done: bool, parts: Vec<InferenceTypePart>) -> Self {
 
        if cfg!(debug_assertions) {
 
            debug_assert!(!parts.is_empty());
 
            let parts_body_marker = parts.iter().any(|v| {
 
                if let InferenceTypePart::MarkerBody(_) = v { true } else { false }
 
            });
 
            debug_assert_eq!(has_body_marker, parts_body_marker);
 
            let parts_done = parts.iter().all(|v| v.is_concrete());
 
            debug_assert_eq!(is_done, parts_done, "{:?}", parts);
 
        }
 
        Self{ has_body_marker, is_done, parts }
 
    }
 

	
 
    /// Replaces a type subtree with the provided subtree. The caller must make
 
    /// sure the the replacement is a well formed type subtree.
 
    fn replace_subtree(&mut self, start_idx: usize, with: &[InferenceTypePart]) {
 
        let end_idx = Self::find_subtree_end_idx(&self.parts, start_idx);
 
        debug_assert_eq!(with.len(), Self::find_subtree_end_idx(with, 0));
 
        self.parts.splice(start_idx..end_idx, with.iter().cloned());
 
        self.recompute_is_done();
 
    }
 

	
 
    // TODO: @performance, might all be done inline in the type inference methods
 
    fn recompute_is_done(&mut self) {
 
        self.is_done = self.parts.iter().all(|v| v.is_concrete());
 
    }
 

	
 
    /// Seeks a body marker starting at the specified position. If a marker is
 
    /// found then its value and the index of the type subtree that follows it
 
    /// is returned.
 
    fn find_body_marker(&self, mut start_idx: usize) -> Option<(usize, usize)> {
 
        while start_idx < self.parts.len() {
 
            if let InferenceTypePart::MarkerBody(marker) = &self.parts[start_idx] {
 
                return Some((*marker, start_idx + 1))
 
            }
 

	
 
            start_idx += 1;
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Returns an iterator over all body markers and the partial type tree that
 
    /// follows those markers. If it is a problem that `InferenceType` is 
 
    /// borrowed by the iterator, then use `find_body_marker`.
 
    fn body_marker_iter(&self) -> InferenceTypeMarkerIter {
 
        InferenceTypeMarkerIter::new(&self.parts)
 
    }
 

	
 
    /// Given that the `parts` are a depth-first serialized tree of types, this
 
    /// function finds the subtree anchored at a specific node. The returned 
 
    /// index is exclusive.
 
    fn find_subtree_end_idx(parts: &[InferenceTypePart], start_idx: usize) -> usize {
 
        let mut depth = 1;
 
        let mut idx = start_idx;
 

	
 
        while idx < parts.len() {
 
            depth += parts[idx].depth_change();
 
            if depth == 0 {
 
                return idx + 1;
 
            }
 
            idx += 1;
 
        }
 

	
 
        // If here, then the inference type is malformed
 
        unreachable!();
 
    }
 

	
 
    /// Call that attempts to infer the part at `to_infer.parts[to_infer_idx]` 
 
    /// using the subtree at `template.parts[template_idx]`. Will return 
 
    /// `Some(depth_change_due_to_traversal)` if type inference has been 
 
    /// applied. In this case the indices will also be modified to point to the 
 
    /// next part in both templates. If type inference has not (or: could not) 
 
    /// be applied then `None` will be returned. Note that this might mean that 
 
    /// the types are incompatible.
 
    ///
 
    /// As this is a helper functions, some assumptions: the parts are not 
 
    /// exactly equal, and neither of them contains a marker. Also: only the
 
    /// `to_infer` parts are checked for inference. It might be that this 
 
    /// function returns `None`, but that that `template` is still compatible
 
    /// with `to_infer`, e.g. when `template` has an `Unknown` part.
 
@@ -423,1558 +426,1575 @@ impl InferenceType {
 

	
 
        let to_check_part = &to_check_parts[*to_check_idx];
 
        let template_part = &template_parts[*template_idx];
 

	
 
        // Checking programmer errors
 
        debug_assert_ne!(to_check_part, template_part);
 
        debug_assert!(!to_check_part.is_marker(), "marker encountered in 'to_check part'");
 
        debug_assert!(!template_part.is_marker(), "marker encountered in 'template part'");
 

	
 
        if to_check_part.may_be_inferred_from(template_part) {
 
            let depth_change = to_check_part.depth_change();
 
            debug_assert_eq!(depth_change, template_part.depth_change());
 
            *to_check_idx += 1;
 
            *template_idx += 1;
 
            return Some(depth_change);
 
        }
 

	
 
        if *to_check_part == ITP::Unknown {
 
            *to_check_idx += 1;
 
            *template_idx = Self::find_subtree_end_idx(template_parts, *template_idx);
 

	
 
            // By definition LHS and RHS had depth change of -1
 
            return Some(-1);
 
        }
 

	
 
        None
 
    }
 

	
 
    /// Attempts to infer types between two `InferenceType` instances. This 
 
    /// function is unsafe as it accepts pointers to work around Rust's 
 
    /// borrowing rules. The caller must ensure that the pointers are distinct.
 
    unsafe fn infer_subtrees_for_both_types(
 
        type_a: *mut InferenceType, start_idx_a: usize,
 
        type_b: *mut InferenceType, start_idx_b: usize
 
    ) -> DualInferenceResult {
 
        debug_assert!(!std::ptr::eq(type_a, type_b), "encountered pointers to the same inference type");
 
        let type_a = &mut *type_a;
 
        let type_b = &mut *type_b;
 

	
 
        let mut modified_a = false;
 
        let mut modified_b = false;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            // Advance indices if we encounter markers or equal parts
 
            let part_a = &type_a.parts[idx_a];
 
            let part_b = &type_b.parts[idx_b];
 
            
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            // Types are not equal and are both not markers
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_a, &mut idx_a, &type_b.parts, &mut idx_b) {
 
                depth += depth_change;
 
                modified_a = true;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::infer_part_for_single_type(type_b, &mut idx_b, &type_a.parts, &mut idx_a) {
 
                depth += depth_change;
 
                modified_b = true;
 
                continue;
 
            }
 

	
 
            // And can also not be inferred in any way: types must be incompatible
 
            return DualInferenceResult::Incompatible;
 
        }
 

	
 
        if modified_a { type_a.recompute_is_done(); }
 
        if modified_b { type_b.recompute_is_done(); }
 

	
 
        // If here then we completely inferred the subtrees.
 
        match (modified_a, modified_b) {
 
            (false, false) => DualInferenceResult::Neither,
 
            (false, true) => DualInferenceResult::Second,
 
            (true, false) => DualInferenceResult::First,
 
            (true, true) => DualInferenceResult::Both
 
        }
 
    }
 

	
 
    /// Attempts to infer the first subtree based on the template. Like
 
    /// `infer_subtrees_for_both_types`, but now only applying inference to
 
    /// `to_infer` based on the type information in `template`.
 
    /// Secondary use is to make sure that a type follows a certain template.
 
    fn infer_subtree_for_single_type(
 
        to_infer: &mut InferenceType, mut to_infer_idx: usize,
 
        template: &[InferenceTypePart], mut template_idx: usize,
 
    ) -> SingleInferenceResult {
 
        let mut modified = false;
 
        let mut depth = 1;
 

	
 
        while depth > 0 {
 
            let to_infer_part = &to_infer.parts[to_infer_idx];
 
            let template_part = &template[template_idx];
 

	
 
            if to_infer_part == template_part {
 
                let depth_change = to_infer_part.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, template_part.depth_change());
 
                to_infer_idx += 1;
 
                template_idx += 1;
 
                continue;
 
            }
 
            if to_infer_part.is_marker() { to_infer_idx += 1; continue; }
 
            if template_part.is_marker() { template_idx += 1; continue; }
 

	
 
            // Types are not equal and not markers. So check if we can infer 
 
            // anything
 
            if let Some(depth_change) = Self::infer_part_for_single_type(
 
                to_infer, &mut to_infer_idx, template, &mut template_idx
 
            ) {
 
                depth += depth_change;
 
                modified = true;
 
                continue;
 
            }
 

	
 
            // We cannot infer anything, but the template may still be 
 
            // compatible with the type we're inferring
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                template, &mut template_idx, &to_infer.parts, &mut to_infer_idx
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return SingleInferenceResult::Incompatible
 
        }
 

	
 
        return if modified {
 
            to_infer.recompute_is_done();
 
            SingleInferenceResult::Modified
 
        } else {
 
            SingleInferenceResult::Unmodified
 
        }
 
    }
 

	
 
    /// Checks if both types are compatible, doesn't perform any inference
 
    fn check_subtrees(
 
        type_parts_a: &[InferenceTypePart], start_idx_a: usize,
 
        type_parts_b: &[InferenceTypePart], start_idx_b: usize
 
    ) -> bool {
 
        let mut depth = 1;
 
        let mut idx_a = start_idx_a;
 
        let mut idx_b = start_idx_b;
 

	
 
        while depth > 0 {
 
            let part_a = &type_parts_a[idx_a];
 
            let part_b = &type_parts_b[idx_b];
 

	
 
            if part_a == part_b {
 
                let depth_change = part_a.depth_change();
 
                depth += depth_change;
 
                debug_assert_eq!(depth_change, part_b.depth_change());
 
                idx_a += 1;
 
                idx_b += 1;
 
                continue;
 
            }
 
            
 
            if part_a.is_marker() { idx_a += 1; continue; }
 
            if part_b.is_marker() { idx_b += 1; continue; }
 

	
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_a, &mut idx_a, type_parts_b, &mut idx_b
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 
            if let Some(depth_change) = Self::check_part_for_single_type(
 
                type_parts_b, &mut idx_b, type_parts_a, &mut idx_a
 
            ) {
 
                depth += depth_change;
 
                continue;
 
            }
 

	
 
            return false;
 
        }
 

	
 
        true
 
    }
 

	
 
    /// Performs the conversion of the inference type into a concrete type.
 
    /// By calling this function you must make sure that no unspecified types
 
    /// (e.g. Unknown or IntegerLike) exist in the type.
 
    fn write_concrete_type(&self, concrete_type: &mut ConcreteType) {
 
    fn write_concrete_type(&self, concrete_type: &mut ConcreteType, discard_marked_types: bool) {
 
        use InferenceTypePart as ITP;
 
        use ConcreteTypePart as CTP;
 

	
 
        // Make sure inference type is specified but concrete type is not yet specified
 
        debug_assert!(!self.parts.is_empty());
 
        debug_assert!(concrete_type.parts.is_empty());
 
        concrete_type.parts.reserve(self.parts.len());
 

	
 
        let mut idx = 0;
 
        while idx < self.parts.len() {
 
            let part = &self.parts[idx];
 
            let converted_part = match part {
 
                ITP::MarkerDefinition(marker) => {
 
                    // Outer markers are converted to regular markers, we
 
                    // completely remove the type subtree that follows it
 
                    // When annotating the AST we keep the markers. When
 
                    // determining types for monomorphs we instead want to
 
                    // keep the type (not the markers)
 
                    if discard_marked_types {
 
                        idx = InferenceType::find_subtree_end_idx(&self.parts, idx + 1);
 
                        concrete_type.parts.push(CTP::Marker(*marker));
 
                    } else {
 
                        idx += 1;
 
                    }
 
                    continue;
 
                },
 
                ITP::MarkerBody(_) => {
 
                    // Inner markers are removed when writing to the concrete
 
                    // type.
 
                    idx += 1;
 
                    continue;
 
                },
 
                ITP::Unknown | ITP::NumberLike | ITP::IntegerLike | ITP::ArrayLike | ITP::PortLike => {
 
                    unreachable!("Attempted to convert inference type part {:?} into concrete type", part);
 
                },
 
                ITP::Void => CTP::Void,
 
                ITP::Message => CTP::Message,
 
                ITP::Bool => CTP::Bool,
 
                ITP::UInt8 => CTP::UInt8,
 
                ITP::UInt16 => CTP::UInt16,
 
                ITP::UInt32 => CTP::UInt32,
 
                ITP::UInt64 => CTP::UInt64,
 
                ITP::SInt8 => CTP::SInt8,
 
                ITP::SInt16 => CTP::SInt16,
 
                ITP::SInt32 => CTP::SInt32,
 
                ITP::SInt64 => CTP::SInt64,
 
                ITP::Character => CTP::Character,
 
                ITP::String => CTP::String,
 
                ITP::Array => CTP::Array,
 
                ITP::Slice => CTP::Slice,
 
                ITP::Input => CTP::Input,
 
                ITP::Output => CTP::Output,
 
                ITP::Instance(id, num) => CTP::Instance(*id, *num),
 
            };
 

	
 
            concrete_type.parts.push(converted_part);
 
            idx += 1;
 
        }
 
    }
 

	
 
    /// Writes a human-readable version of the type to a string. This is used
 
    /// to display error messages
 
    fn write_display_name(
 
        buffer: &mut String, heap: &Heap, parts: &[InferenceTypePart], mut idx: usize
 
    ) -> usize {
 
        use InferenceTypePart as ITP;
 

	
 
        match &parts[idx] {
 
            ITP::MarkerDefinition(thing) => {
 
                buffer.push_str(&format!("{{D:{}}}", *thing));
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
            }, 
 
            ITP::MarkerBody(thing) => {
 
                buffer.push_str(&format!("{{B:{}}}", *thing));
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
            },
 
            ITP::Unknown => buffer.push_str("?"),
 
            ITP::NumberLike => buffer.push_str("numberlike"),
 
            ITP::IntegerLike => buffer.push_str("integerlike"),
 
            ITP::ArrayLike => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[?]");
 
            },
 
            ITP::PortLike => {
 
                buffer.push_str("portlike<");
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            }
 
            ITP::Void => buffer.push_str("void"),
 
            ITP::Bool => buffer.push_str(KW_TYPE_BOOL_STR),
 
            ITP::UInt8 => buffer.push_str(KW_TYPE_UINT8_STR),
 
            ITP::UInt16 => buffer.push_str(KW_TYPE_UINT16_STR),
 
            ITP::UInt32 => buffer.push_str(KW_TYPE_UINT32_STR),
 
            ITP::UInt64 => buffer.push_str(KW_TYPE_UINT64_STR),
 
            ITP::SInt8 => buffer.push_str(KW_TYPE_SINT8_STR),
 
            ITP::SInt16 => buffer.push_str(KW_TYPE_SINT16_STR),
 
            ITP::SInt32 => buffer.push_str(KW_TYPE_SINT32_STR),
 
            ITP::SInt64 => buffer.push_str(KW_TYPE_SINT64_STR),
 
            ITP::Character => buffer.push_str(KW_TYPE_CHAR_STR),
 
            ITP::String => buffer.push_str(KW_TYPE_STRING_STR),
 
            ITP::Message => {
 
                buffer.push_str(KW_TYPE_MESSAGE_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Array => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[]");
 
            },
 
            ITP::Slice => {
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push_str("[..]");
 
            },
 
            ITP::Input => {
 
                buffer.push_str(KW_TYPE_IN_PORT_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Output => {
 
                buffer.push_str(KW_TYPE_OUT_PORT_STR);
 
                buffer.push('<');
 
                idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                buffer.push('>');
 
            },
 
            ITP::Instance(definition_id, num_sub) => {
 
                let definition = &heap[*definition_id];
 
                buffer.push_str(definition.identifier().value.as_str());
 
                if *num_sub > 0 {
 
                    buffer.push('<');
 
                    idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    for _sub_idx in 1..*num_sub {
 
                        buffer.push_str(", ");
 
                        idx = Self::write_display_name(buffer, heap, parts, idx + 1);
 
                    }
 
                    buffer.push('>');
 
                }
 
            },
 
        }
 

	
 
        idx
 
    }
 

	
 
    /// Returns the display name of a (part of) the type tree. Will allocate a
 
    /// string.
 
    fn partial_display_name(heap: &Heap, parts: &[InferenceTypePart]) -> String {
 
        let mut buffer = String::with_capacity(parts.len() * 6);
 
        Self::write_display_name(&mut buffer, heap, parts, 0);
 
        buffer
 
    }
 

	
 
    /// Returns the display name of the full type tree. Will allocate a string.
 
    fn display_name(&self, heap: &Heap) -> String {
 
        Self::partial_display_name(heap, &self.parts)
 
    }
 
}
 

	
 
/// Iterator over the subtrees that follow a marker in an `InferenceType`
 
/// instance. Returns immutable slices over the internal parts
 
struct InferenceTypeMarkerIter<'a> {
 
    parts: &'a [InferenceTypePart],
 
    idx: usize,
 
}
 

	
 
impl<'a> InferenceTypeMarkerIter<'a> {
 
    fn new(parts: &'a [InferenceTypePart]) -> Self {
 
        Self{ parts, idx: 0 }
 
    }
 
}
 

	
 
impl<'a> Iterator for InferenceTypeMarkerIter<'a> {
 
    type Item = (usize, &'a [InferenceTypePart]);
 

	
 
    fn next(&mut self) -> Option<Self::Item> {
 
        // Iterate until we find a marker
 
        while self.idx < self.parts.len() {
 
            if let InferenceTypePart::MarkerBody(marker) = self.parts[self.idx] {
 
                // Found a marker, find the subtree end
 
                let start_idx = self.idx + 1;
 
                let end_idx = InferenceType::find_subtree_end_idx(self.parts, start_idx);
 

	
 
                // Modify internal index, then return items
 
                self.idx = end_idx;
 
                return Some((marker, &self.parts[start_idx..end_idx]));
 
            }
 

	
 
            self.idx += 1;
 
        }
 

	
 
        None
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum DualInferenceResult {
 
    Neither,        // neither argument is clarified
 
    First,          // first argument is clarified using the second one
 
    Second,         // second argument is clarified using the first one
 
    Both,           // both arguments are clarified
 
    Incompatible,   // types are incompatible: programmer error
 
}
 

	
 
impl DualInferenceResult {
 
    fn modified_lhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::First | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
    fn modified_rhs(&self) -> bool {
 
        match self {
 
            DualInferenceResult::Second | DualInferenceResult::Both => true,
 
            _ => false
 
        }
 
    }
 
}
 

	
 
#[derive(Debug, PartialEq, Eq)]
 
enum SingleInferenceResult {
 
    Unmodified,
 
    Modified,
 
    Incompatible
 
}
 

	
 
enum DefinitionType{
 
    Component(ComponentDefinitionId),
 
    Function(FunctionDefinitionId),
 
}
 

	
 
impl DefinitionType {
 
    fn definition_id(&self) -> DefinitionId {
 
        match self {
 
            DefinitionType::Component(v) => v.upcast(),
 
            DefinitionType::Function(v) => v.upcast(),
 
        }
 
    }
 
}
 

	
 
#[derive(PartialEq, Eq)]
 
pub(crate) struct ResolveQueueElement {
 
    pub(crate) root_id: RootId,
 
    pub(crate) definition_id: DefinitionId,
 
    pub(crate) monomorph_types: Vec<ConcreteType>,
 
}
 

	
 
pub(crate) type ResolveQueue = Vec<ResolveQueueElement>;
 

	
 
/// This particular visitor will recurse depth-first into the AST and ensures
 
/// that all expressions have the appropriate types.
 
pub(crate) struct PassTyping {
 
    // Current definition we're typechecking.
 
    definition_type: DefinitionType,
 
    poly_vars: Vec<ConcreteType>,
 

	
 
    // Buffers for iteration over substatements and subexpressions
 
    stmt_buffer: Vec<StatementId>,
 
    expr_buffer: Vec<ExpressionId>,
 

	
 
    // Mapping from parser type to inferred type. We attempt to continue to
 
    // specify these types until we're stuck or we've fully determined the type.
 
    var_types: HashMap<VariableId, VarData>,      // types of variables
 
    expr_types: HashMap<ExpressionId, InferenceType>,   // types of expressions
 
    extra_data: HashMap<ExpressionId, ExtraData>,       // data for polymorph inference
 
    // Keeping track of which expressions need to be reinferred because the
 
    // expressions they're linked to made progression on an associated type
 
    expr_queued: HashSet<ExpressionId>,
 
}
 

	
 
// TODO: @rename used for calls and struct literals, maybe union literals?
 
// TODO: @Rename, this is used for a lot of type inferencing. It seems like
 
//  there is a different underlying architecture waiting to surface.
 
struct ExtraData {
 
    /// Progression of polymorphic variables (if any)
 
    poly_vars: Vec<InferenceType>,
 
    /// Progression of types of call arguments or struct members
 
    embedded: Vec<InferenceType>,
 
    returned: InferenceType,
 
}
 

	
 
struct VarData {
 
    /// Type of the variable
 
    var_type: InferenceType,
 
    /// VariableExpressions that use the variable
 
    used_at: Vec<ExpressionId>,
 
    /// For channel statements we link to the other variable such that when one
 
    /// channel's interior type is resolved, we can also resolve the other one.
 
    linked_var: Option<VariableId>,
 
}
 

	
 
impl VarData {
 
    fn new_channel(var_type: InferenceType, other_port: VariableId) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: Some(other_port) }
 
    }
 
    fn new_local(var_type: InferenceType) -> Self {
 
        Self{ var_type, used_at: Vec::new(), linked_var: None }
 
    }
 
}
 

	
 
impl PassTyping {
 
    pub(crate) fn new() -> Self {
 
        PassTyping {
 
            definition_type: DefinitionType::Function(FunctionDefinitionId::new_invalid()),
 
            poly_vars: Vec::new(),
 
            stmt_buffer: Vec::with_capacity(STMT_BUFFER_INIT_CAPACITY),
 
            expr_buffer: Vec::with_capacity(EXPR_BUFFER_INIT_CAPACITY),
 
            var_types: HashMap::new(),
 
            expr_types: HashMap::new(),
 
            extra_data: HashMap::new(),
 
            expr_queued: HashSet::new(),
 
        }
 
    }
 

	
 
    // TODO: @cleanup Unsure about this, maybe a pattern will arise after
 
    //  a while.
 
    pub(crate) fn queue_module_definitions(ctx: &Ctx, queue: &mut ResolveQueue) {
 
        debug_assert_eq!(ctx.module.phase, ModuleCompilationPhase::ValidatedAndLinked);
 
        let root_id = ctx.module.root_id;
 
        let root = &ctx.heap.protocol_descriptions[root_id];
 
        for definition_id in &root.definitions {
 
            let definition = &ctx.heap[*definition_id];
 
            match definition {
 
                Definition::Function(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Component(definition) => {
 
                    if definition.poly_vars.is_empty() {
 
                        queue.push(ResolveQueueElement{
 
                            root_id,
 
                            definition_id: *definition_id,
 
                            monomorph_types: Vec::new(),
 
                        })
 
                    }
 
                },
 
                Definition::Enum(_) | Definition::Struct(_) | Definition::Union(_) => {},
 
            }
 
        }
 
    }
 

	
 
    pub(crate) fn handle_module_definition(
 
        &mut self, ctx: &mut Ctx, queue: &mut ResolveQueue, element: ResolveQueueElement
 
    ) -> VisitorResult {
 
        // Visit the definition
 
        debug_assert_eq!(ctx.module.root_id, element.root_id);
 
        self.reset();
 
        self.poly_vars.clear();
 
        self.poly_vars.extend(element.monomorph_types.iter().cloned());
 
        self.visit_definition(ctx, element.definition_id)?;
 

	
 
        // Keep resolving types
 
        self.resolve_types(ctx, queue)?;
 
        Ok(())
 
    }
 

	
 
    fn reset(&mut self) {
 
        self.definition_type = DefinitionType::Function(FunctionDefinitionId::new_invalid());
 
        self.poly_vars.clear();
 
        self.stmt_buffer.clear();
 
        self.expr_buffer.clear();
 
        self.var_types.clear();
 
        self.expr_types.clear();
 
        self.extra_data.clear();
 
        self.expr_queued.clear();
 
    }
 
}
 

	
 
impl Visitor2 for PassTyping {
 
    // Definitions
 

	
 
    fn visit_component_definition(&mut self, ctx: &mut Ctx, id: ComponentDefinitionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Component(id);
 

	
 
        let comp_def = &ctx.heap[id];
 
        debug_assert_eq!(comp_def.poly_vars.len(), self.poly_vars.len(), "component polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting component '{}': {}", comp_def.identifier.value.as_str(), id.0.index);
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in comp_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected component arguments to be concrete types");
 
            self.var_types.insert(param_id, VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_block_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    fn visit_function_definition(&mut self, ctx: &mut Ctx, id: FunctionDefinitionId) -> VisitorResult {
 
        self.definition_type = DefinitionType::Function(id);
 

	
 
        let func_def = &ctx.heap[id];
 
        debug_assert_eq!(func_def.poly_vars.len(), self.poly_vars.len(), "function polyvars do not match imposed polyvars");
 

	
 
        debug_log!("{}", "-".repeat(50));
 
        debug_log!("Visiting function '{}': {}", func_def.identifier.value.as_str(), id.0.index);
 
        if debug_log_enabled!() {
 
            debug_log!("Polymorphic variables:");
 
            for (idx, poly_var) in self.poly_vars.iter().enumerate() {
 
                let mut infer_type_parts = Vec::new();
 
                for concrete_part in &poly_var.parts {
 
                    infer_type_parts.push(InferenceTypePart::from(*concrete_part));
 
                }
 
                let infer_type = InferenceType::new(false, true, infer_type_parts);
 
                debug_log!(" - [{:03}] {:?}", idx, infer_type.display_name(&ctx.heap));
 
            }
 
        }
 
        debug_log!("{}", "-".repeat(50));
 

	
 
        for param_id in func_def.parameters.clone() {
 
            let param = &ctx.heap[param_id];
 
            let var_type = self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, true);
 
            debug_assert!(var_type.is_done, "expected function arguments to be concrete types");
 
            self.var_types.insert(param_id, VarData::new_local(var_type));
 
        }
 

	
 
        let body_stmt_id = ctx.heap[id].body;
 
        self.visit_block_stmt(ctx, body_stmt_id)
 
    }
 

	
 
    // Statements
 

	
 
    fn visit_block_stmt(&mut self, ctx: &mut Ctx, id: BlockStatementId) -> VisitorResult {
 
        // Transfer statements for traversal
 
        let block = &ctx.heap[id];
 

	
 
        for stmt_id in block.statements.clone() {
 
            self.visit_stmt(ctx, stmt_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_memory_stmt(&mut self, ctx: &mut Ctx, id: MemoryStatementId) -> VisitorResult {
 
        let memory_stmt = &ctx.heap[id];
 

	
 
        let local = &ctx.heap[memory_stmt.variable];
 
        let var_type = self.determine_inference_type_from_parser_type_elements(&local.parser_type.elements, true);
 
        self.var_types.insert(memory_stmt.variable, VarData::new_local(var_type));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_local_channel_stmt(&mut self, ctx: &mut Ctx, id: ChannelStatementId) -> VisitorResult {
 
        let channel_stmt = &ctx.heap[id];
 

	
 
        let from_local = &ctx.heap[channel_stmt.from];
 
        let from_var_type = self.determine_inference_type_from_parser_type_elements(&from_local.parser_type.elements, true);
 
        self.var_types.insert(from_local.this, VarData::new_channel(from_var_type, channel_stmt.to));
 

	
 
        let to_local = &ctx.heap[channel_stmt.to];
 
        let to_var_type = self.determine_inference_type_from_parser_type_elements(&to_local.parser_type.elements, true);
 
        self.var_types.insert(to_local.this, VarData::new_channel(to_var_type, channel_stmt.from));
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_labeled_stmt(&mut self, ctx: &mut Ctx, id: LabeledStatementId) -> VisitorResult {
 
        let labeled_stmt = &ctx.heap[id];
 
        let substmt_id = labeled_stmt.body;
 
        self.visit_stmt(ctx, substmt_id)
 
    }
 

	
 
    fn visit_if_stmt(&mut self, ctx: &mut Ctx, id: IfStatementId) -> VisitorResult {
 
        let if_stmt = &ctx.heap[id];
 

	
 
        let true_body_id = if_stmt.true_body;
 
        let false_body_id = if_stmt.false_body;
 
        let test_expr_id = if_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_block_stmt(ctx, true_body_id)?;
 
        if let Some(false_body_id) = false_body_id {
 
            self.visit_block_stmt(ctx, false_body_id)?;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_while_stmt(&mut self, ctx: &mut Ctx, id: WhileStatementId) -> VisitorResult {
 
        let while_stmt = &ctx.heap[id];
 

	
 
        let body_id = while_stmt.body;
 
        let test_expr_id = while_stmt.test;
 

	
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_block_stmt(ctx, body_id)?;
 

	
 
        Ok(())
 
    }
 

	
 
    fn visit_synchronous_stmt(&mut self, ctx: &mut Ctx, id: SynchronousStatementId) -> VisitorResult {
 
        let sync_stmt = &ctx.heap[id];
 
        let body_id = sync_stmt.body;
 

	
 
        self.visit_block_stmt(ctx, body_id)
 
    }
 

	
 
    fn visit_return_stmt(&mut self, ctx: &mut Ctx, id: ReturnStatementId) -> VisitorResult {
 
        let return_stmt = &ctx.heap[id];
 
        debug_assert_eq!(return_stmt.expressions.len(), 1);
 
        let expr_id = return_stmt.expressions[0];
 

	
 
        self.visit_expr(ctx, expr_id)
 
    }
 

	
 
    fn visit_new_stmt(&mut self, ctx: &mut Ctx, id: NewStatementId) -> VisitorResult {
 
        let new_stmt = &ctx.heap[id];
 
        let call_expr_id = new_stmt.expression;
 

	
 
        self.visit_call_expr(ctx, call_expr_id)
 
    }
 

	
 
    fn visit_expr_stmt(&mut self, ctx: &mut Ctx, id: ExpressionStatementId) -> VisitorResult {
 
        let expr_stmt = &ctx.heap[id];
 
        let subexpr_id = expr_stmt.expression;
 

	
 
        self.visit_expr(ctx, subexpr_id)
 
    }
 

	
 
    // Expressions
 

	
 
    fn visit_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let assign_expr = &ctx.heap[id];
 
        let left_expr_id = assign_expr.left;
 
        let right_expr_id = assign_expr.right;
 

	
 
        self.visit_expr(ctx, left_expr_id)?;
 
        self.visit_expr(ctx, right_expr_id)?;
 

	
 
        self.progress_assignment_expr(ctx, id)
 
    }
 

	
 
    fn visit_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let conditional_expr = &ctx.heap[id];
 
        let test_expr_id = conditional_expr.test;
 
        let true_expr_id = conditional_expr.true_expression;
 
        let false_expr_id = conditional_expr.false_expression;
 

	
 
        self.expr_types.insert(test_expr_id, InferenceType::new(false, true, vec![InferenceTypePart::Bool]));
 
        self.visit_expr(ctx, test_expr_id)?;
 
        self.visit_expr(ctx, true_expr_id)?;
 
        self.visit_expr(ctx, false_expr_id)?;
 

	
 
        self.progress_conditional_expr(ctx, id)
 
    }
 

	
 
    fn visit_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let binary_expr = &ctx.heap[id];
 
        let lhs_expr_id = binary_expr.left;
 
        let rhs_expr_id = binary_expr.right;
 

	
 
        self.visit_expr(ctx, lhs_expr_id)?;
 
        self.visit_expr(ctx, rhs_expr_id)?;
 

	
 
        self.progress_binary_expr(ctx, id)
 
    }
 

	
 
    fn visit_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let unary_expr = &ctx.heap[id];
 
        let arg_expr_id = unary_expr.expression;
 

	
 
        self.visit_expr(ctx, arg_expr_id)?;
 

	
 
        self.progress_unary_expr(ctx, id)
 
    }
 

	
 
    fn visit_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let indexing_expr = &ctx.heap[id];
 
        let subject_expr_id = indexing_expr.subject;
 
        let index_expr_id = indexing_expr.index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, index_expr_id)?;
 

	
 
        self.progress_indexing_expr(ctx, id)
 
    }
 

	
 
    fn visit_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let slicing_expr = &ctx.heap[id];
 
        let subject_expr_id = slicing_expr.subject;
 
        let from_expr_id = slicing_expr.from_index;
 
        let to_expr_id = slicing_expr.to_index;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 
        self.visit_expr(ctx, from_expr_id)?;
 
        self.visit_expr(ctx, to_expr_id)?;
 

	
 
        self.progress_slicing_expr(ctx, id)
 
    }
 

	
 
    fn visit_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> VisitorResult {
 
        // TODO: @Monomorph, this is a temporary hack, see other comments
 
        let expr = &mut ctx.heap[id];
 
        if let Field::Symbolic(field) = &mut expr.field {
 
            field.definition = None;
 
            field.field_idx = 0;
 
        }
 

	
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let select_expr = &ctx.heap[id];
 
        let subject_expr_id = select_expr.subject;
 

	
 
        self.visit_expr(ctx, subject_expr_id)?;
 

	
 
        self.progress_select_expr(ctx, id)
 
    }
 

	
 
    fn visit_literal_expr(&mut self, ctx: &mut Ctx, id: LiteralExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let literal_expr = &ctx.heap[id];
 
        match &literal_expr.value {
 
            Literal::Null | Literal::False | Literal::True |
 
            Literal::Integer(_) | Literal::Character(_) | Literal::String(_) => {
 
                // No subexpressions
 
            },
 
            Literal::Struct(literal) => {
 
                // TODO: @performance
 
                let expr_ids: Vec<_> = literal.fields
 
                    .iter()
 
                    .map(|f| f.value)
 
                    .collect();
 

	
 
                self.insert_initial_struct_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            },
 
            Literal::Enum(_) => {
 
                // Enumerations do not carry any subexpressions, but may still
 
                // have a user-defined polymorphic marker variable. For this 
 
                // reason we may still have to apply inference to this 
 
                // polymorphic variable
 
                self.insert_initial_enum_polymorph_data(ctx, id);
 
            },
 
            Literal::Union(literal) => {
 
                // May carry subexpressions and polymorphic arguments
 
                // TODO: @performance
 
                let expr_ids = literal.values.clone();
 
                self.insert_initial_union_polymorph_data(ctx, id);
 

	
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            },
 
            Literal::Array(expressions) => {
 
                // TODO: @performance
 
                let expr_ids = expressions.clone();
 
                for expr_id in expr_ids {
 
                    self.visit_expr(ctx, expr_id)?;
 
                }
 
            }
 
        }
 

	
 
        self.progress_literal_expr(ctx, id)
 
    }
 

	
 
    fn visit_call_expr(&mut self, ctx: &mut Ctx, id: CallExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 
        self.insert_initial_call_polymorph_data(ctx, id);
 

	
 
        // TODO: @performance
 
        let call_expr = &ctx.heap[id];
 
        for arg_expr_id in call_expr.arguments.clone() {
 
            self.visit_expr(ctx, arg_expr_id)?;
 
        }
 

	
 
        self.progress_call_expr(ctx, id)
 
    }
 

	
 
    fn visit_variable_expr(&mut self, ctx: &mut Ctx, id: VariableExpressionId) -> VisitorResult {
 
        let upcast_id = id.upcast();
 
        self.insert_initial_expr_inference_type(ctx, upcast_id)?;
 

	
 
        let var_expr = &ctx.heap[id];
 
        debug_assert!(var_expr.declaration.is_some());
 
        let var_data = self.var_types.get_mut(var_expr.declaration.as_ref().unwrap()).unwrap();
 
        var_data.used_at.push(upcast_id);
 

	
 
        self.progress_variable_expr(ctx, id)
 
    }
 
}
 

	
 
macro_rules! debug_assert_expr_ids_unique_and_known {
 
    // Base case for a single expression ID
 
    ($resolver:ident, $id:ident) => {
 
        if cfg!(debug_assertions) {
 
            $resolver.expr_types.contains_key(&$id);
 
        }
 
    };
 
    // Base case for two expression IDs
 
    ($resolver:ident, $id1:ident, $id2:ident) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2);
 
    };
 
    // Generic case
 
    ($resolver:ident, $id1:ident, $id2:ident, $($tail:ident),+) => {
 
        debug_assert_ne!($id1, $id2);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id1);
 
        debug_assert_expr_ids_unique_and_known!($resolver, $id2, $($tail),+);
 
    };
 
}
 

	
 
macro_rules! debug_assert_ptrs_distinct {
 
    // Base case
 
    ($ptr1:ident, $ptr2:ident) => {
 
        debug_assert!(!std::ptr::eq($ptr1, $ptr2));
 
    };
 
    // Generic case
 
    ($ptr1:ident, $ptr2:ident, $($tail:ident),+) => {
 
        debug_assert_ptrs_distinct!($ptr1, $ptr2);
 
        debug_assert_ptrs_distinct!($ptr2, $($tail),+);
 
    };
 
}
 

	
 
impl PassTyping {
 
    fn resolve_types(&mut self, ctx: &mut Ctx, queue: &mut ResolveQueue) -> Result<(), ParseError> {
 
        // Keep inferring until we can no longer make any progress
 
        while let Some(next_expr_id) = self.expr_queued.iter().next() {
 
            let next_expr_id = *next_expr_id;
 
            self.expr_queued.remove(&next_expr_id);
 
            self.progress_expr(ctx, next_expr_id)?;
 
        }
 

	
 
        // We check if we have all the types we need. If we're typechecking a 
 
        // polymorphic procedure more than once, then we have already annotated
 
        // the AST and have now performed typechecking for a different 
 
        // monomorph. In that case we just need to perform typechecking, no need
 
        // to annotate the AST again.
 
        // TODO: @Monomorph, this is completely wrong. It seemed okay, but it
 
        //  isn't. Each monomorph might result in completely different internal
 
        //  types.
 
        let definition_id = match &self.definition_type {
 
            DefinitionType::Component(id) => id.upcast(),
 
            DefinitionType::Function(id) => id.upcast(),
 
        };
 

	
 
        let already_checked = ctx.types.get_base_definition(&definition_id).unwrap().has_any_monomorph();
 
        for (expr_id, expr_type) in self.expr_types.iter_mut() {
 
            if !expr_type.is_done {
 
                // Auto-infer numberlike/integerlike types to a regular int
 
                if expr_type.parts.len() == 1 && expr_type.parts[0] == InferenceTypePart::IntegerLike {
 
                    expr_type.parts[0] = InferenceTypePart::SInt32;
 
                } else {
 
                    let expr = &ctx.heap[*expr_id];
 
                    return Err(ParseError::new_error_at_span(
 
                        &ctx.module.source, expr.span(), format!(
 
                            "could not fully infer the type of this expression (got '{}')",
 
                            expr_type.display_name(&ctx.heap)
 
                        )
 
                    ));
 
                }
 
            }
 

	
 
            if !already_checked {
 
                let concrete_type = ctx.heap[*expr_id].get_type_mut();
 
                expr_type.write_concrete_type(concrete_type);
 
                expr_type.write_concrete_type(concrete_type, true);
 
            } else {
 
                if cfg!(debug_assertions) {
 
                    let mut concrete_type = ConcreteType::default();
 
                    expr_type.write_concrete_type(&mut concrete_type);
 
                    expr_type.write_concrete_type(&mut concrete_type, true);
 
                    debug_assert_eq!(*ctx.heap[*expr_id].get_type(), concrete_type);
 
                }
 
            }
 
        }
 

	
 
        // All types are fine
 
        ctx.types.add_monomorph(&definition_id, self.poly_vars.clone());
 

	
 
        // Check all things we need to monomorphize
 
        // TODO: Struct/enum/union monomorphization
 
        for (expr_id, extra_data) in self.extra_data.iter() {
 
            if extra_data.poly_vars.is_empty() { continue; }
 

	
 
            // Retrieve polymorph variable specification. Those of struct 
 
            // literals and those of procedure calls need to be fully inferred.
 
            // The remaining ones (e.g. select expressions) allow partial 
 
            // inference of types, as long as the accessed field's type is
 
            // fully inferred.
 
            let needs_full_inference = match &ctx.heap[*expr_id] {
 
                Expression::Call(_) => true,
 
                Expression::Literal(_) => true,
 
                _ => false
 
            };
 

	
 
            if needs_full_inference {
 
                let mut monomorph_types = Vec::with_capacity(extra_data.poly_vars.len());
 
                for (poly_idx, poly_type) in extra_data.poly_vars.iter().enumerate() {
 
                    if !poly_type.is_done {
 
                        // TODO: Single clean function for function signatures and polyvars.
 
                        // TODO: Better error message
 
                        let expr = &ctx.heap[*expr_id];
 
                        return Err(ParseError::new_error_at_span(
 
                            &ctx.module.source, expr.span(), format!(
 
                                "could not fully infer the type of polymorphic variable {} of this expression (got '{}')",
 
                                poly_idx, poly_type.display_name(&ctx.heap)
 
                            )
 
                        ))
 
                    }
 

	
 
                    let mut concrete_type = ConcreteType::default();
 
                    poly_type.write_concrete_type(&mut concrete_type);
 
                    poly_type.write_concrete_type(&mut concrete_type, false);
 
                    monomorph_types.insert(poly_idx, concrete_type);
 
                }
 

	
 
                // Resolve to the appropriate expression and instantiate 
 
                // monomorphs.
 
                match &ctx.heap[*expr_id] {
 
                    Expression::Call(call_expr) => {
 
                        // Add to type table if not yet typechecked
 
                        if call_expr.method == Method::UserFunction {
 
                            let definition_id = call_expr.definition;
 
                            if !ctx.types.has_monomorph(&definition_id, &monomorph_types) {
 
                                let root_id = ctx.types
 
                                    .get_base_definition(&definition_id)
 
                                    .unwrap()
 
                                    .ast_root;
 

	
 
                                // Pre-emptively add the monomorph to the type table, but
 
                                // we still need to perform typechecking on it
 
                                // TODO: Unsure about this, performance wise
 
                                let queue_element = ResolveQueueElement{
 
                                    root_id,
 
                                    definition_id,
 
                                    monomorph_types,
 
                                };
 
                                if !queue.contains(&queue_element) {
 
                                    queue.push(queue_element);
 
                                }
 
                            }
 
                        }
 
                    },
 
                    Expression::Literal(lit_expr) => {
 
                        let definition_id = match &lit_expr.value {
 
                            Literal::Struct(literal) => &literal.definition,
 
                            Literal::Enum(literal) => &literal.definition,
 
                            Literal::Union(literal) => &literal.definition,
 
                            _ => unreachable!("post-inference monomorph for non-struct, non-enum literal")
 
                            _ => unreachable!("post-inference monomorph for non-struct, non-enum, non-union literal")
 
                        };
 
                        if !ctx.types.has_monomorph(definition_id, &monomorph_types) {
 
                            ctx.types.add_monomorph(definition_id, monomorph_types);
 
                        }
 
                    },
 
                    _ => unreachable!("needs fully inference, but not a struct literal or call expression")
 
                }
 
            } // else: was just a helper structure...
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_expr(&mut self, ctx: &mut Ctx, id: ExpressionId) -> Result<(), ParseError> {
 
        match &ctx.heap[id] {
 
            Expression::Assignment(expr) => {
 
                let id = expr.this;
 
                self.progress_assignment_expr(ctx, id)
 
            },
 
            Expression::Binding(_expr) => {
 
                unimplemented!("progress binding expression");
 
            },
 
            Expression::Conditional(expr) => {
 
                let id = expr.this;
 
                self.progress_conditional_expr(ctx, id)
 
            },
 
            Expression::Binary(expr) => {
 
                let id = expr.this;
 
                self.progress_binary_expr(ctx, id)
 
            },
 
            Expression::Unary(expr) => {
 
                let id = expr.this;
 
                self.progress_unary_expr(ctx, id)
 
            },
 
            Expression::Indexing(expr) => {
 
                let id = expr.this;
 
                self.progress_indexing_expr(ctx, id)
 
            },
 
            Expression::Slicing(expr) => {
 
                let id = expr.this;
 
                self.progress_slicing_expr(ctx, id)
 
            },
 
            Expression::Select(expr) => {
 
                let id = expr.this;
 
                self.progress_select_expr(ctx, id)
 
            },
 
            Expression::Literal(expr) => {
 
                let id = expr.this;
 
                self.progress_literal_expr(ctx, id)
 
            },
 
            Expression::Call(expr) => {
 
                let id = expr.this;
 
                self.progress_call_expr(ctx, id)
 
            },
 
            Expression::Variable(expr) => {
 
                let id = expr.this;
 
                self.progress_variable_expr(ctx, id)
 
            }
 
        }
 
    }
 

	
 
    fn progress_assignment_expr(&mut self, ctx: &mut Ctx, id: AssignmentExpressionId) -> Result<(), ParseError> {
 
        use AssignmentOperator as AO;
 

	
 
        let upcast_id = id.upcast();
 

	
 
        // Assignment does not return anything (it operates like a statement)
 
        let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &VOID_TEMPLATE)?;
 

	
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.left;
 
        let arg2_expr_id = expr.right;
 

	
 
        debug_log!("Assignment expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Apply forced constraint to LHS value
 
        let progress_forced = match expr.operation {
 
            AO::Set =>
 
                false,
 
            AO::Multiplied | AO::Divided | AO::Added | AO::Subtracted =>
 
                self.apply_forced_constraint(ctx, arg1_expr_id, &NUMBERLIKE_TEMPLATE)?,
 
            AO::Remained | AO::ShiftedLeft | AO::ShiftedRight |
 
            AO::BitwiseAnded | AO::BitwiseXored | AO::BitwiseOred =>
 
                self.apply_forced_constraint(ctx, arg1_expr_id, &INTEGERLIKE_TEMPLATE)?,
 
        };
 

	
 
        let (progress_arg1, progress_arg2) = self.apply_equal2_constraint(
 
            ctx, upcast_id, arg1_expr_id, 0, arg2_expr_id, 0
 
        )?;
 
        debug_assert!(if progress_forced { progress_arg2 } else { true });
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_forced || progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_forced || progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_conditional_expr(&mut self, ctx: &mut Ctx, id: ConditionalExpressionId) -> Result<(), ParseError> {
 
        // Note: test expression type is already enforced
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_expr_id = expr.true_expression;
 
        let arg2_expr_id = expr.false_expression;
 

	
 
        debug_log!("Conditional expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = self.apply_equal3_constraint(
 
            ctx, upcast_id, arg1_expr_id, arg2_expr_id, 0
 
        )?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_expr_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_expr_id); }
 
        if progress_arg2 { self.queue_expr(arg2_expr_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_binary_expr(&mut self, ctx: &mut Ctx, id: BinaryExpressionId) -> Result<(), ParseError> {
 
        // Note: our expression type might be fixed by our parent, but we still
 
        // need to make sure it matches the type associated with our operation.
 
        use BinaryOperator as BO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg1_id = expr.left;
 
        let arg2_id = expr.right;
 

	
 
        debug_log!("Binary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg1 type: {}", self.expr_types.get(&arg1_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type: {}", self.expr_types.get(&arg2_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg1, progress_arg2) = match expr.operation {
 
            BO::Concatenate => {
 
                // Arguments may be arrays/slices, output is always an array
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &ARRAY_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &ARRAYLIKE_TEMPLATE)?;
 

	
 
                // If they're all arraylike, then we want the subtype to match
 
                let (subtype_expr, subtype_arg1, subtype_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 1)?;
 

	
 
                (progress_expr || subtype_expr, progress_arg1 || subtype_arg1, progress_arg2 || subtype_arg2)
 
            },
 
            BO::LogicalOr | BO::LogicalAnd => {
 
                // Forced boolean on all
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg1 = self.apply_forced_constraint(ctx, arg1_id, &BOOL_TEMPLATE)?;
 
                let progress_arg2 = self.apply_forced_constraint(ctx, arg2_id, &BOOL_TEMPLATE)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::BitwiseOr | BO::BitwiseXor | BO::BitwiseAnd | BO::Remainder | BO::ShiftLeft | BO::ShiftRight => {
 
                // All equal of integer type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
            BO::Equality | BO::Inequality => {
 
                // Equal2 on args, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg1, progress_arg2)
 
            },
 
            BO::LessThan | BO::GreaterThan | BO::LessThanEqual | BO::GreaterThanEqual => {
 
                // Equal2 on args with numberlike type, forced boolean output
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg_base = self.apply_forced_constraint(ctx, arg1_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_arg1, progress_arg2) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, arg1_id, 0, arg2_id, 0)?;
 

	
 
                (progress_expr, progress_arg_base || progress_arg1, progress_arg_base || progress_arg2)
 
            },
 
            BO::Add | BO::Subtract | BO::Multiply | BO::Divide => {
 
                // All equal of number type
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg1, progress_arg2) =
 
                    self.apply_equal3_constraint(ctx, upcast_id, arg1_id, arg2_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg1, progress_base || progress_arg2)
 
            },
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg1 type [{}]: {}", progress_arg1, self.expr_types.get(&arg1_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Arg2 type [{}]: {}", progress_arg2, self.expr_types.get(&arg2_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg1 { self.queue_expr(arg1_id); }
 
        if progress_arg2 { self.queue_expr(arg2_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_unary_expr(&mut self, ctx: &mut Ctx, id: UnaryExpressionId) -> Result<(), ParseError> {
 
        use UnaryOperator as UO;
 

	
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let arg_id = expr.expression;
 

	
 
        debug_log!("Unary expr '{:?}': {}", expr.operation, upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Arg  type: {}", self.expr_types.get(&arg_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let (progress_expr, progress_arg) = match expr.operation {
 
            UO::Positive | UO::Negative => {
 
                // Equal types of numeric class
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &NUMBERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::BitwiseNot | UO::PreIncrement | UO::PreDecrement | UO::PostIncrement | UO::PostDecrement => {
 
                // Equal types of integer class
 
                let progress_base = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 
                let (progress_expr, progress_arg) =
 
                    self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, arg_id, 0)?;
 

	
 
                (progress_base || progress_expr, progress_base || progress_arg)
 
            },
 
            UO::LogicalNot => {
 
                // Both booleans
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                let progress_arg = self.apply_forced_constraint(ctx, upcast_id, &BOOL_TEMPLATE)?;
 
                (progress_expr, progress_arg)
 
            }
 
        };
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Arg  type [{}]: {}", progress_arg, self.expr_types.get(&arg_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_arg { self.queue_expr(arg_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_indexing_expr(&mut self, ctx: &mut Ctx, id: IndexingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let index_id = expr.index;
 

	
 
        debug_log!("Indexing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Index   type: {}", self.expr_types.get(&index_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Make sure subject is arraylike and index is integerlike
 
        let progress_subject_base = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_index = self.apply_forced_constraint(ctx, index_id, &INTEGERLIKE_TEMPLATE)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Index   type [{}]: {}", progress_index, self.expr_types.get(&index_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_index { self.queue_expr(index_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_slicing_expr(&mut self, ctx: &mut Ctx, id: SlicingExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 
        let subject_id = expr.subject;
 
        let from_id = expr.from_index;
 
        let to_id = expr.to_index;
 

	
 
        debug_log!("Slicing expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type: {}", self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type: {}", self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        // Make sure subject is arraylike and indices are of equal integerlike
 
        let progress_subject_base = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
        let progress_idx_base = self.apply_forced_constraint(ctx, from_id, &INTEGERLIKE_TEMPLATE)?;
 
        let (progress_from, progress_to) = self.apply_equal2_constraint(ctx, upcast_id, from_id, 0, to_id, 0)?;
 

	
 
        // Make sure if output is of T then subject is Array<T>
 
        let (progress_expr, progress_subject) =
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 1)?;
 
            self.apply_equal2_constraint(ctx, upcast_id, upcast_id, 0, subject_id, 0)?;
 

	
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject_base || progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - FromIdx type [{}]: {}", progress_idx_base || progress_from, self.expr_types.get(&from_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - ToIdx   type [{}]: {}", progress_idx_base || progress_to, self.expr_types.get(&to_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 
        if progress_subject_base || progress_subject { self.queue_expr(subject_id); }
 
        if progress_idx_base || progress_from { self.queue_expr(from_id); }
 
        if progress_idx_base || progress_to { self.queue_expr(to_id); }
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_select_expr(&mut self, ctx: &mut Ctx, id: SelectExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        
 
        debug_log!("Select expr: {}", upcast_id.index);
 
        debug_log!(" * Before:");
 
        debug_log!("   - Subject type: {}", self.expr_types.get(&ctx.heap[id].subject).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type: {}", self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        let expr = &mut ctx.heap[id];
 
        let subject_id = expr.subject;
 

	
 
        fn determine_inference_type_instance<'a>(types: &'a TypeTable, infer_type: &InferenceType) -> Result<Option<&'a DefinedType>, ()> {
 
            for part in &infer_type.parts {
 
                if part.is_marker() || !part.is_concrete() {
 
                    continue;
 
                }
 

	
 
                // Part is concrete, check if it is an instance of something
 
                if let InferenceTypePart::Instance(definition_id, _num_sub) = part {
 
                    // Lookup type definition and ensure the specified field 
 
                    // name exists on the struct
 
                    let definition = types.get_base_definition(definition_id);
 
                    debug_assert!(definition.is_some());
 
                    let definition = definition.unwrap();
 

	
 
                    return Ok(Some(definition))
 
                } else {
 
                    // Expected an instance of something
 
                    return Err(())
 
                }
 
            }
 

	
 
            // Nothing is concrete yet
 
            Ok(None)
 
        }
 

	
 
        let (progress_subject, progress_expr) = match &mut expr.field {
 
            Field::Length => {
 
                let progress_subject = self.apply_forced_constraint(ctx, subject_id, &ARRAYLIKE_TEMPLATE)?;
 
                let progress_expr = self.apply_forced_constraint(ctx, upcast_id, &INTEGERLIKE_TEMPLATE)?;
 

	
 
                (progress_subject, progress_expr)
 
            },
 
            Field::Symbolic(field) => {
 
                // Retrieve the struct definition id and field index if possible 
 
                // and not previously determined
 
                if field.definition.is_none() {
 
                    // Not yet known, check if we can determine it
 
                    let subject_type = self.expr_types.get(&subject_id).unwrap();
 
                    let type_def = determine_inference_type_instance(&ctx.types, subject_type);
 

	
 
                    match type_def {
 
                        Ok(Some(type_def)) => {
 
                            // Subject type is known, check if it is a 
 
                            // struct and the field exists on the struct
 
                            let struct_def = if let DefinedTypeVariant::Struct(struct_def) = &type_def.definition {
 
                                struct_def
 
                            } else {
 
                                return Err(ParseError::new_error_at_span(
 
                                    &ctx.module.source, field.identifier.span, format!(
 
                                        "Can only apply field access to structs, got a subject of type '{}'",
 
                                        subject_type.display_name(&ctx.heap)
 
                                    )
 
                                ));
 
                            };
 

	
 
                            for (field_def_idx, field_def) in struct_def.fields.iter().enumerate() {
 
                                if field_def.identifier == field.identifier {
 
                                    // Set field definition and index
 
                                    field.definition = Some(type_def.ast_definition);
 
                                    field.field_idx = field_def_idx;
 
                                    break;
 
                                }
 
                            }
 

	
 
                            if field.definition.is_none() {
 
                                let field_span = field.identifier.span;
 
                                let ast_struct_def = ctx.heap[type_def.ast_definition].as_struct();
 
                                return Err(ParseError::new_error_at_span(
 
                                    &ctx.module.source, field_span, format!(
 
                                        "this field does not exist on the struct '{}'",
 
                                        ast_struct_def.identifier.value.as_str()
 
                                    )
 
                                ))
 
                            }
 

	
 
                            // Encountered definition and field index for the
 
                            // first time
 
                            self.insert_initial_select_polymorph_data(ctx, id);
 
                        },
 
                        Ok(None) => {
 
                            // Type of subject is not yet known, so we 
 
                            // cannot make any progress yet
 
                            return Ok(())
 
                        },
 
                        Err(()) => {
 
                            return Err(ParseError::new_error_at_span(
 
                                &ctx.module.source, field.identifier.span, format!(
 
                                    "Can only apply field access to structs, got a subject of type '{}'",
 
                                    subject_type.display_name(&ctx.heap)
 
                                )
 
                            ));
 
                        }
 
                    }
 
                }
 

	
 
                // If here then field definition and index are known, and the
 
                // initial type (based on the struct's definition) has been
 
                // applied.
 
                // Check to see if we can infer anything about the subject's and
 
                // the field's polymorphic variables
 
                let poly_data = self.extra_data.get_mut(&upcast_id).unwrap();
 
                let mut poly_progress = HashSet::new();
 
                
 
                // Apply to struct's type
 
                let signature_type: *mut _ = &mut poly_data.embedded[0];
 
                let subject_type: *mut _ = self.expr_types.get_mut(&subject_id).unwrap();
 

	
 
                let (_, progress_subject) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, Some(subject_id), poly_data, &mut poly_progress,
 
                    signature_type, 0, subject_type, 0
 
                )?;
 

	
 
                if progress_subject {
 
                    self.expr_queued.insert(subject_id);
 
                }
 
                
 
                // Apply to field's type
 
                let signature_type: *mut _ = &mut poly_data.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
                let (_, progress_expr) = Self::apply_equal2_signature_constraint(
 
                    ctx, upcast_id, None, poly_data, &mut poly_progress, 
 
                    signature_type, 0, expr_type, 0
 
                )?;
 

	
 
                if progress_expr {
 
                    if let Some(parent_id) = ctx.heap[upcast_id].parent_expr_id() {
 
                        self.expr_queued.insert(parent_id);
 
                    }
 
                }
 

	
 
                // Reapply progress in polymorphic variables to struct's type
 
                let signature_type: *mut _ = &mut poly_data.embedded[0];
 
                let subject_type: *mut _ = self.expr_types.get_mut(&subject_id).unwrap();
 
                
 
                let progress_subject = Self::apply_equal2_polyvar_constraint(
 
                    poly_data, &poly_progress, signature_type, subject_type
 
                );
 

	
 
                let signature_type: *mut _ = &mut poly_data.returned;
 
                let expr_type: *mut _ = self.expr_types.get_mut(&upcast_id).unwrap();
 

	
 
                let progress_expr = Self::apply_equal2_polyvar_constraint(
 
                    poly_data, &poly_progress, signature_type, expr_type
 
                );
 

	
 
                (progress_subject, progress_expr)
 
            }
 
        };
 

	
 
        if progress_subject { self.queue_expr(subject_id); }
 
        if progress_expr { self.queue_expr_parent(ctx, upcast_id); }
 

	
 
        debug_log!(" * After:");
 
        debug_log!("   - Subject type [{}]: {}", progress_subject, self.expr_types.get(&subject_id).unwrap().display_name(&ctx.heap));
 
        debug_log!("   - Expr    type [{}]: {}", progress_expr, self.expr_types.get(&upcast_id).unwrap().display_name(&ctx.heap));
 

	
 
        Ok(())
 
    }
 

	
 
    fn progress_literal_expr(&mut self, ctx: &mut Ctx, id: LiteralExpressionId) -> Result<(), ParseError> {
 
        let upcast_id = id.upcast();
 
        let expr = &ctx.heap[id];
 

	
 
        debug_log!("Literal expr: {}", upcast_id.index);
 
@@ -2627,386 +2647,384 @@ impl PassTyping {
 

	
 
        // Iterate through markers in signature type to try and make progress
 
        // on the polymorphic variable        
 
        let mut seek_idx = 0;
 
        let mut modified_sig = false;
 
        
 
        while let Some((poly_idx, start_idx)) = signature_type.find_body_marker(seek_idx) {
 
            let end_idx = InferenceType::find_subtree_end_idx(&signature_type.parts, start_idx);
 
            // if polymorph_progress.contains(&poly_idx) {
 
                // Need to match subtrees
 
                let polymorph_type = &polymorph_data.poly_vars[poly_idx];
 
                let modified_at_marker = Self::apply_forced_constraint_types(
 
                    signature_type, start_idx, 
 
                    &polymorph_type.parts, 0
 
                ).expect("no failure when applying polyvar constraints");
 

	
 
                modified_sig = modified_sig || modified_at_marker;
 
            // }
 

	
 
            seek_idx = end_idx;
 
        }
 

	
 
        // If we made any progress on the signature's type, then we also need to
 
        // apply it to the expression that is supposed to match the signature.
 
        if modified_sig {
 
            match InferenceType::infer_subtree_for_single_type(
 
                expr_type, 0, &signature_type.parts, 0
 
            ) {
 
                SingleInferenceResult::Modified => true,
 
                SingleInferenceResult::Unmodified => false,
 
                SingleInferenceResult::Incompatible =>
 
                    unreachable!("encountered failure while reapplying modified signature to expression after polyvar inference")
 
            }
 
        } else {
 
            false
 
        }
 
    }
 

	
 
    /// Applies a type constraint that expects all three provided types to be
 
    /// equal. In case we can make progress in inferring the types then we
 
    /// attempt to do so. If the call is successful then the composition of all
 
    /// types is made equal.
 
    fn apply_equal3_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId,
 
        arg1_id: ExpressionId, arg2_id: ExpressionId,
 
        start_idx: usize
 
    ) -> Result<(bool, bool, bool), ParseError> {
 
        // Safety: all expression IDs are always distinct, and we do not modify
 
        //  the container
 
        debug_assert_expr_ids_unique_and_known!(self, expr_id, arg1_id, arg2_id);
 
        let expr_type: *mut _ = self.expr_types.get_mut(&expr_id).unwrap();
 
        let arg1_type: *mut _ = self.expr_types.get_mut(&arg1_id).unwrap();
 
        let arg2_type: *mut _ = self.expr_types.get_mut(&arg2_id).unwrap();
 

	
 
        let expr_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(expr_type, start_idx, arg1_type, start_idx)
 
        };
 
        if expr_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_expr_type_error(ctx, expr_id, arg1_id));
 
        }
 

	
 
        let args_res = unsafe{
 
            InferenceType::infer_subtrees_for_both_types(arg1_type, start_idx, arg2_type, start_idx) };
 
        if args_res == DualInferenceResult::Incompatible {
 
            return Err(self.construct_arg_type_error(ctx, expr_id, arg1_id, arg2_id));
 
        }
 

	
 
        // If all types are compatible, but the second call caused the arg1_type
 
        // to be expanded, then we must also assign this to expr_type.
 
        let mut progress_expr = expr_res.modified_lhs();
 
        let mut progress_arg1 = expr_res.modified_rhs();
 
        let progress_arg2 = args_res.modified_rhs();
 

	
 
        if args_res.modified_lhs() { 
 
            unsafe {
 
                let end_idx = InferenceType::find_subtree_end_idx(&(*arg2_type).parts, start_idx);
 
                let subtree = &((*arg2_type).parts[start_idx..end_idx]);
 
                (*expr_type).replace_subtree(start_idx, subtree);
 
            }
 
            progress_expr = true;
 
            progress_arg1 = true;
 
        }
 

	
 
        Ok((progress_expr, progress_arg1, progress_arg2))
 
    }
 

	
 
    // TODO: @optimize Since we only deal with a single type this might be done
 
    //  a lot more efficiently, methinks (disregarding the allocations here)
 
    fn apply_equal_n_constraint(
 
        &mut self, ctx: &Ctx, expr_id: ExpressionId, args: &[ExpressionId],
 
    ) -> Result<Vec<bool>, ParseError> {
 
        // Early exit
 
        match args.len() {
 
            0 => return Ok(vec!()),         // nothing to progress
 
            1 => return Ok(vec![false]),    // only one type, so nothing to infer
 
            _ => {}
 
        }
 

	
 
        let mut progress = Vec::new();
 
        progress.resize(args.len(), false);
 

	
 
        // Do pairwise inference, keep track of the last entry we made progress
 
        // on. Once done we need to update everything to the most-inferred type.
 
        let mut arg_iter = args.iter();
 
        let mut last_arg_id = *arg_iter.next().unwrap();
 
        let mut last_lhs_progressed = 0;
 
        let mut lhs_arg_idx = 0;
 

	
 
        while let Some(next_arg_id) = arg_iter.next() {
 
            let arg1_type: *mut _ = self.expr_types.get_mut(&last_arg_id).unwrap();
 
            let arg2_type: *mut _ = self.expr_types.get_mut(next_arg_id).unwrap();
 

	
 
            let res = unsafe {
 
                InferenceType::infer_subtrees_for_both_types(arg1_type, 0, arg2_type, 0)
 
            };
 

	
 
            if res == DualInferenceResult::Incompatible {
 
                return Err(self.construct_arg_type_error(ctx, expr_id, last_arg_id, *next_arg_id));
 
            }
 

	
 
            if res.modified_lhs() {
 
                // We re-inferred something on the left hand side, so everything
 
                // up until now should be re-inferred.
 
                progress[lhs_arg_idx] = true;
 
                last_lhs_progressed = lhs_arg_idx;
 
            }
 
            progress[lhs_arg_idx + 1] = res.modified_rhs();
 

	
 
            last_arg_id = *next_arg_id;
 
            lhs_arg_idx += 1;
 
        }
 

	
 
        // Re-infer everything. Note that we do not need to re-infer the type
 
        // exactly at `last_lhs_progressed`, but only everything up to it.
 
        let last_type: *mut _ = self.expr_types.get_mut(args.last().unwrap()).unwrap();
 
        for arg_idx in 0..last_lhs_progressed {
 
            let arg_type: *mut _ = self.expr_types.get_mut(&args[arg_idx]).unwrap();
 
            unsafe{
 
                (*arg_type).replace_subtree(0, &(*last_type).parts);
 
            }
 
            progress[arg_idx] = true;
 
        }
 

	
 
        Ok(progress)
 
    }
 

	
 
    /// Determines the `InferenceType` for the expression based on the
 
    /// expression parent. Note that if the parent is another expression, we do
 
    /// not take special action, instead we let parent expressions fix the type
 
    /// of subexpressions before they have a chance to call this function.
 
    fn insert_initial_expr_inference_type(
 
        &mut self, ctx: &mut Ctx, expr_id: ExpressionId
 
    ) -> Result<(), ParseError> {
 
        use ExpressionParent as EP;
 
        use InferenceTypePart as ITP;
 

	
 
        let expr = &ctx.heap[expr_id];
 
        let inference_type = match expr.parent() {
 
            EP::None =>
 
                // Should have been set by linker
 
                unreachable!(),
 
            EP::ExpressionStmt(_) | EP::Expression(_, _) =>
 
                // Determined during type inference
 
                InferenceType::new(false, false, vec![ITP::Unknown]),
 
            EP::If(_) | EP::While(_) =>
 
                // Must be a boolean
 
                InferenceType::new(false, true, vec![ITP::Bool]),
 
            EP::Return(_) =>
 
                // Must match the return type of the function
 
                if let DefinitionType::Function(func_id) = self.definition_type {
 
                    debug_assert_eq!(ctx.heap[func_id].return_types.len(), 1);
 
                    let returned = &ctx.heap[func_id].return_types[0];
 
                    self.determine_inference_type_from_parser_type_elements(&returned.elements, true)
 
                } else {
 
                    // Cannot happen: definition always set upon body traversal
 
                    // and "return" calls in components are illegal.
 
                    unreachable!();
 
                },
 
            EP::New(_) =>
 
                // Must be a component call, which we assign a "Void" return
 
                // type
 
                InferenceType::new(false, true, vec![ITP::Void]),
 
        };
 

	
 
        match self.expr_types.entry(expr_id) {
 
            Entry::Vacant(vacant) => {
 
                vacant.insert(inference_type);
 
            },
 
            Entry::Occupied(mut preexisting) => {
 
                // We already have an entry, this happens if our parent fixed
 
                // our type (e.g. we're used in a conditional expression's test)
 
                // but we have a different type.
 
                // TODO: Is this ever called? Seems like it can't
 
                debug_assert!(false, "I am actually called, my ID is {}", expr_id.index);
 
                let old_type = preexisting.get_mut();
 
                if let SingleInferenceResult::Incompatible = InferenceType::infer_subtree_for_single_type(
 
                    old_type, 0, &inference_type.parts, 0
 
                ) {
 
                    return Err(self.construct_expr_type_error(ctx, expr_id, expr_id))
 
                }
 
            }
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    fn insert_initial_call_polymorph_data(
 
        &mut self, ctx: &mut Ctx, call_id: CallExpressionId
 
    ) {
 
        // Note: the polymorph variables may be partially specified and may
 
        // contain references to the wrapping definition's (i.e. the proctype
 
        // we are currently visiting) polymorphic arguments.
 
        //
 
        // The arguments of the call may refer to polymorphic variables in the
 
        // definition of the function we're calling, not of the wrapping
 
        // definition. We insert markers in these inferred types to be able to
 
        // map them back and forth to the polymorphic arguments of the function
 
        // we are calling.
 
        let call = &ctx.heap[call_id];
 

	
 
        // Handle the polymorphic arguments (if there are any)
 
        let num_poly_args = call.parser_type.elements[0].variant.num_embedded();
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 
        for embedded_elements in call.parser_type.iter_embedded(0) {
 
            poly_args.push(self.determine_inference_type_from_parser_type_elements(embedded_elements, true));
 
        }
 

	
 
        // Handle the arguments and return types
 
        let definition = &ctx.heap[call.definition];
 
        let (parameters, returned) = match definition {
 
            Definition::Component(definition) => {
 
                debug_assert_eq!(poly_args.len(), definition.poly_vars.len());
 
                (&definition.parameters, None)
 
            },
 
            Definition::Function(definition) => {
 
                debug_assert_eq!(poly_args.len(), definition.poly_vars.len());
 
                (&definition.parameters, Some(&definition.return_types))
 
            },
 
            Definition::Struct(_) | Definition::Enum(_) | Definition::Union(_) => {
 
                unreachable!("insert_initial_call_polymorph data for non-procedure type");
 
            },
 
        };
 

	
 
        let mut parameter_types = Vec::with_capacity(parameters.len());
 
        for parameter_id in parameters.clone().into_iter() { // TODO: @Performance
 
            let param = &ctx.heap[parameter_id];
 
            parameter_types.push(self.determine_inference_type_from_parser_type_elements(&param.parser_type.elements, false));
 
        }
 

	
 
        let return_type = match returned {
 
            None => {
 
                // Component, so returns a "Void"
 
                InferenceType::new(false, true, vec![InferenceTypePart::Void])
 
            },
 
            Some(returned) => {
 
                debug_assert_eq!(returned.len(), 1);
 
                let returned = &returned[0];
 
                self.determine_inference_type_from_parser_type_elements(&returned.elements, false)
 
            }
 
        };
 

	
 
        self.extra_data.insert(call_id.upcast(), ExtraData {
 
            poly_vars: poly_args,
 
            embedded: parameter_types,
 
            returned: return_type
 
        });
 
    }
 

	
 
    fn insert_initial_struct_polymorph_data(
 
        &mut self, ctx: &mut Ctx, lit_id: LiteralExpressionId,
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = ctx.heap[lit_id].value.as_struct();
 

	
 
        // Handle polymorphic arguments
 
        let num_embedded = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_embedded);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle parser types on struct definition
 
        let defined_type = ctx.types.get_base_definition(&literal.definition).unwrap();
 
        let struct_type = defined_type.definition.as_struct();
 
        debug_assert_eq!(poly_args.len(), defined_type.poly_vars.len());
 

	
 
        // Note: programmer is capable of specifying fields in a struct literal
 
        // in a different order than on the definition. We take the literal-
 
        // specified order to be leading.
 
        let mut embedded_types = Vec::with_capacity(struct_type.fields.len());
 
        for lit_field in literal.fields.iter() {
 
            let def_field = &struct_type.fields[lit_field.field_idx];
 
            let inference_type = self.determine_inference_type_from_parser_type_elements(&def_field.parser_type.elements, false);
 
            embedded_types.push(inference_type);
 
        }
 

	
 
        // Return type is the struct type itself, with the appropriate 
 
        // polymorphic variables. So:
 
        // - 1 part for definition
 
        // - N_poly_arg marker parts for each polymorphic argument
 
        // - all the parts for the currently known polymorphic arguments 
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(literal.definition, poly_args.len()));
 
        let mut return_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { return_type_done = false; }
 

	
 
            parts.push(ITP::MarkerBody(poly_var_idx));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts.len(), parts_reserved);
 
        let return_type = InferenceType::new(!poly_args.is_empty(), return_type_done, parts);
 

	
 
        self.extra_data.insert(lit_id.upcast(), ExtraData{
 
            poly_vars: poly_args,
 
            embedded: embedded_types,
 
            returned: return_type,
 
        });
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct for enum expressions. These
 
    /// can never be determined from the enum itself, but may be inferred from
 
    /// the use of the enum.
 
    fn insert_initial_enum_polymorph_data(
 
        &mut self, ctx: &Ctx, lit_id: LiteralExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = ctx.heap[lit_id].value.as_enum();
 

	
 
        // Handle polymorphic arguments to the enum
 
        let num_poly_args = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
 

	
 
        // Handle enum type itself
 
        let parts_reserved = 1 + poly_args.len() + total_num_poly_parts;
 
        let mut parts = Vec::with_capacity(parts_reserved);
 
        parts.push(ITP::Instance(literal.definition, poly_args.len()));
 
        let mut enum_type_done = true;
 
        for (poly_var_idx, poly_var) in poly_args.iter().enumerate() {
 
            if !poly_var.is_done { enum_type_done = false; }
 

	
 
            parts.push(ITP::MarkerBody(poly_var_idx));
 
            parts.extend(poly_var.parts.iter().cloned());
 
        }
 

	
 
        debug_assert_eq!(parts.len(), parts_reserved);
 
        let enum_type = InferenceType::new(!poly_args.is_empty(), enum_type_done, parts);
 

	
 
        self.extra_data.insert(lit_id.upcast(), ExtraData{
 
            poly_vars: poly_args,
 
            embedded: Vec::new(),
 
            returned: enum_type,
 
        });
 
    }
 

	
 
    /// Inserts the extra polymorphic data struct for unions. The polymorphic
 
    /// arguments may be partially determined from embedded values in the union.
 
    fn insert_initial_union_polymorph_data(
 
        &mut self, ctx: &Ctx, lit_id: LiteralExpressionId
 
    ) {
 
        use InferenceTypePart as ITP;
 
        let literal = ctx.heap[lit_id].value.as_union();
 

	
 
        // Construct the polymorphic variables
 
        let num_poly_args = literal.parser_type.elements[0].variant.num_embedded();
 
        let mut total_num_poly_parts = 0;
 
        let mut poly_args = Vec::with_capacity(num_poly_args);
 

	
 
        for embedded_elements in literal.parser_type.iter_embedded(0) {
 
            let poly_type = self.determine_inference_type_from_parser_type_elements(embedded_elements, true);
 
            total_num_poly_parts += poly_type.parts.len();
 
            poly_args.push(poly_type);
 
        }
src/protocol/parser/type_table.rs
Show inline comments
 
@@ -138,385 +138,386 @@ pub struct EnumVariant {
 

	
 
/// `UnionType` is the algebraic datatype (or sum type, or discriminated union).
 
/// A value is an element of the union, identified by its tag, and may contain
 
/// a single subtype.
 
pub struct UnionType {
 
    pub(crate) variants: Vec<UnionVariant>,
 
    pub(crate) tag_representation: PrimitiveType
 
}
 

	
 
pub struct UnionVariant {
 
    pub(crate) identifier: Identifier,
 
    pub(crate) embedded: Vec<ParserType>, // zero-length does not have embedded values
 
    pub(crate) tag_value: i64,
 
}
 

	
 
pub struct StructType {
 
    pub(crate) fields: Vec<StructField>,
 
}
 

	
 
pub struct StructField {
 
    pub(crate) identifier: Identifier,
 
    pub(crate) parser_type: ParserType,
 
}
 

	
 
pub struct FunctionType {
 
    pub return_types: Vec<ParserType>,
 
    pub arguments: Vec<FunctionArgument>
 
}
 

	
 
pub struct ComponentType {
 
    pub variant: ComponentVariant,
 
    pub arguments: Vec<FunctionArgument>
 
}
 

	
 
pub struct FunctionArgument {
 
    identifier: Identifier,
 
    parser_type: ParserType,
 
}
 

	
 
//------------------------------------------------------------------------------
 
// Type table
 
//------------------------------------------------------------------------------
 

	
 
// TODO: @cleanup Do I really need this, doesn't make the code that much cleaner
 
struct TypeIterator {
 
    breadcrumbs: Vec<(RootId, DefinitionId)>
 
}
 

	
 
impl TypeIterator {
 
    fn new() -> Self {
 
        Self{ breadcrumbs: Vec::with_capacity(32) }
 
    }
 

	
 
    fn reset(&mut self, root_id: RootId, definition_id: DefinitionId) {
 
        self.breadcrumbs.clear();
 
        self.breadcrumbs.push((root_id, definition_id))
 
    }
 

	
 
    fn push(&mut self, root_id: RootId, definition_id: DefinitionId) {
 
        self.breadcrumbs.push((root_id, definition_id));
 
    }
 

	
 
    fn contains(&self, root_id: RootId, definition_id: DefinitionId) -> bool {
 
        for (stored_root_id, stored_definition_id) in self.breadcrumbs.iter() {
 
            if *stored_root_id == root_id && *stored_definition_id == definition_id { return true; }
 
        }
 

	
 
        return false
 
    }
 

	
 
    fn top(&self) -> Option<(RootId, DefinitionId)> {
 
        self.breadcrumbs.last().map(|(r, d)| (*r, *d))
 
    }
 

	
 
    fn pop(&mut self) {
 
        debug_assert!(!self.breadcrumbs.is_empty());
 
        self.breadcrumbs.pop();
 
    }
 
}
 

	
 
/// Result from attempting to resolve a `ParserType` using the symbol table and
 
/// the type table.
 
enum ResolveResult {
 
    Builtin,
 
    PolymoprhicArgument,
 
    /// ParserType points to a user-defined type that is already resolved in the
 
    /// type table.
 
    Resolved(RootId, DefinitionId),
 
    /// ParserType points to a user-defined type that is not yet resolved into
 
    /// the type table.
 
    Unresolved(RootId, DefinitionId)
 
}
 

	
 
pub(crate) struct TypeTable {
 
    /// Lookup from AST DefinitionId to a defined type. Considering possible
 
    /// polymorphs is done inside the `DefinedType` struct.
 
    lookup: HashMap<DefinitionId, DefinedType>,
 
    /// Iterator over `(module, definition)` tuples used as workspace to make sure
 
    /// that each base definition of all a type's subtypes are resolved.
 
    iter: TypeIterator,
 
    /// Iterator over `parser type`s during the process where `parser types` are
 
    /// resolved into a `(module, definition)` tuple.
 
    parser_type_iter: VecDeque<ParserTypeId>,
 
}
 

	
 
impl TypeTable {
 
    /// Construct a new type table without any resolved types.
 
    pub(crate) fn new() -> Self {
 
        Self{ 
 
            lookup: HashMap::new(), 
 
            iter: TypeIterator::new(), 
 
            parser_type_iter: VecDeque::with_capacity(64), 
 
        }
 
    }
 

	
 
    pub(crate) fn build_base_types(&mut self, modules: &mut [Module], ctx: &mut PassCtx) -> Result<(), ParseError> {
 
        // Make sure we're allowed to cast root_id to index into ctx.modules
 
        debug_assert!(modules.iter().all(|m| m.phase >= ModuleCompilationPhase::DefinitionsParsed));
 
        debug_assert!(self.lookup.is_empty());
 
        debug_assert!(self.iter.top().is_none());
 
        debug_assert!(self.parser_type_iter.is_empty());
 

	
 
        if cfg!(debug_assertions) {
 
            for (index, module) in modules.iter().enumerate() {
 
                debug_assert_eq!(index, module.root_id.index as usize);
 
            }
 
        }
 

	
 
        // Use context to guess hashmap size
 
        let reserve_size = ctx.heap.definitions.len();
 
        self.lookup.reserve(reserve_size);
 

	
 
        for root_idx in 0..modules.len() {
 
            let last_definition_idx = ctx.heap[modules[root_idx].root_id].definitions.len();
 
            for definition_idx in 0..last_definition_idx {
 
                let definition_id = ctx.heap[modules[root_idx].root_id].definitions[definition_idx];
 
                self.resolve_base_definition(modules, ctx, definition_id)?;
 
            }
 
        }
 

	
 
        debug_assert_eq!(self.lookup.len() + 6, reserve_size, "mismatch in reserved size of type table"); // NOTE: Temp fix for builtin functions
 
        for module in modules {
 
            module.phase = ModuleCompilationPhase::TypesAddedToTable;
 
        }
 

	
 
        Ok(())
 
    }
 

	
 
    /// Retrieves base definition from type table. We must be able to retrieve
 
    /// it as we resolve all base types upon type table construction (for now).
 
    /// However, in the future we might do on-demand type resolving, so return
 
    /// an option anyway
 
    pub(crate) fn get_base_definition(&self, definition_id: &DefinitionId) -> Option<&DefinedType> {
 
        self.lookup.get(&definition_id)
 
    }
 

	
 
    /// Instantiates a monomorph for a given base definition.
 
    pub(crate) fn add_monomorph(&mut self, definition_id: &DefinitionId, types: Vec<ConcreteType>) {
 
        debug_assert!(
 
            self.lookup.contains_key(definition_id),
 
            "attempting to instantiate monomorph of definition unknown to type table"
 
        );
 

	
 
        let definition = self.lookup.get_mut(definition_id).unwrap();
 
        definition.add_monomorph(types);
 
    }
 

	
 
    /// Checks if a given definition already has a specific monomorph
 
    pub(crate) fn has_monomorph(&mut self, definition_id: &DefinitionId, types: &Vec<ConcreteType>) -> bool {
 
        debug_assert!(
 
            self.lookup.contains_key(definition_id),
 
            "attempting to check monomorph existence of definition unknown to type table"
 
        );
 

	
 
        let definition = self.lookup.get(definition_id).unwrap();
 
        definition.has_monomorph(types)
 
    }
 

	
 
    /// This function will resolve just the basic definition of the type, it
 
    /// will not handle any of the monomorphized instances of the type.
 
    fn resolve_base_definition<'a>(&'a mut self, modules: &[Module], ctx: &mut PassCtx, definition_id: DefinitionId) -> Result<(), ParseError> {
 
        // Check if we have already resolved the base definition
 
        if self.lookup.contains_key(&definition_id) { return Ok(()); }
 

	
 
        let root_id = ctx.heap[definition_id].defined_in();
 
        self.iter.reset(root_id, definition_id);
 

	
 
        while let Some((root_id, definition_id)) = self.iter.top() {
 
            // We have a type to resolve
 
            let definition = &ctx.heap[definition_id];
 

	
 
            let can_pop_breadcrumb = match definition {
 
                // TODO: @cleanup Borrow rules hax
 
                // Bit ugly, since we already have the definition, but we need
 
                // to work around rust borrowing rules...
 
                Definition::Enum(_) => self.resolve_base_enum_definition(modules, ctx, root_id, definition_id),
 
                Definition::Union(_) => self.resolve_base_union_definition(modules, ctx, root_id, definition_id),
 
                Definition::Struct(_) => self.resolve_base_struct_definition(modules, ctx, root_id, definition_id),
 
                Definition::Component(_) => self.resolve_base_component_definition(modules, ctx, root_id, definition_id),
 
                Definition::Function(_) => self.resolve_base_function_definition(modules, ctx, root_id, definition_id),
 
            }?;
 

	
 
            // Otherwise: `ingest_resolve_result` has pushed a new breadcrumb
 
            // that we must follow before we can resolve the current type
 
            if can_pop_breadcrumb {
 
                self.iter.pop();
 
            }
 
        }
 

	
 
        // We must have resolved the type
 
        debug_assert!(self.lookup.contains_key(&definition_id), "base type not resolved");
 
        Ok(())
 
    }
 

	
 
    /// Resolve the basic enum definition to an entry in the type table. It will
 
    /// not instantiate any monomorphized instances of polymorphic enum
 
    /// definitions. If a subtype has to be resolved first then this function
 
    /// will return `false` after calling `ingest_resolve_result`.
 
    fn resolve_base_enum_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, root_id: RootId, definition_id: DefinitionId) -> Result<bool, ParseError> {
 
        debug_assert!(ctx.heap[definition_id].is_enum());
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base enum already resolved");
 
        
 
        let definition = ctx.heap[definition_id].as_enum();
 

	
 
        let mut enum_value = -1;
 
        let mut min_enum_value = 0;
 
        let mut max_enum_value = 0;
 
        let mut variants = Vec::with_capacity(definition.variants.len());
 
        for variant in &definition.variants {
 
            enum_value += 1;
 
            match &variant.value {
 
                EnumVariantValue::None => {
 
                    variants.push(EnumVariant{
 
                        identifier: variant.identifier.clone(),
 
                        value: enum_value,
 
                    });
 
                },
 
                EnumVariantValue::Integer(override_value) => {
 
                    enum_value = *override_value;
 
                    variants.push(EnumVariant{
 
                        identifier: variant.identifier.clone(),
 
                        value: enum_value,
 
                    });
 
                }
 
            }
 
            if enum_value < min_enum_value { min_enum_value = enum_value; }
 
            else if enum_value > max_enum_value { max_enum_value = enum_value; }
 
        }
 

	
 
        // Ensure enum names and polymorphic args do not conflict
 
        self.check_identifier_collision(
 
            modules, root_id, &variants, |variant| &variant.identifier, "enum variant"
 
        )?;
 

	
 
        // Because we're parsing an enum, the programmer cannot put the
 
        // polymorphic variables inside the variants. But the polymorphic
 
        // variables might still be present as "marker types"
 
        self.check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 
        let poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 

	
 
        self.lookup.insert(definition_id, DefinedType {
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Enum(EnumType{
 
                variants,
 
                representation: Self::enum_tag_type(min_enum_value, max_enum_value)
 
            }),
 
            poly_vars,
 
            is_polymorph: false,
 
            monomorphs: Vec::new()
 
        });
 

	
 
        Ok(true)
 
    }
 

	
 
    /// Resolves the basic union definiton to an entry in the type table. It
 
    /// will not instantiate any monomorphized instances of polymorphic union
 
    /// definitions. If a subtype has to be resolved first then this function
 
    /// will return `false` after calling `ingest_resolve_result`.
 
    fn resolve_base_union_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, root_id: RootId, definition_id: DefinitionId) -> Result<bool, ParseError> {
 
        debug_assert!(ctx.heap[definition_id].is_union());
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base union already resolved");
 

	
 
        let definition = ctx.heap[definition_id].as_union();
 

	
 
        // Make sure all embedded types are resolved
 
        for variant in &definition.variants {
 
            match &variant.value {
 
                UnionVariantValue::None => {},
 
                UnionVariantValue::Embedded(embedded) => {
 
                    for parser_type in embedded {
 
                        let resolve_result = self.resolve_base_parser_type(modules, ctx, root_id, parser_type)?;
 
                        if !self.ingest_resolve_result(modules, ctx, resolve_result)? {
 
                            return Ok(false)
 
                        }
 
                    }
 
                }
 
            }
 
        }
 

	
 
        // If here then all embedded types are resolved
 

	
 
        // Determine the union variants
 
        let mut tag_value = -1;
 
        let mut variants = Vec::with_capacity(definition.variants.len());
 
        for variant in &definition.variants {
 
            tag_value += 1;
 
            let embedded = match &variant.value {
 
                UnionVariantValue::None => { Vec::new() },
 
                UnionVariantValue::Embedded(embedded) => {
 
                    // Type should be resolvable, we checked this above
 
                    embedded.clone()
 
                },
 
            };
 

	
 
            variants.push(UnionVariant{
 
                identifier: variant.identifier.clone(),
 
                embedded,
 
                tag_value,
 
            })
 
        }
 

	
 
        // Ensure union names and polymorphic args do not conflict
 
        self.check_identifier_collision(
 
            modules, root_id, &variants, |variant| &variant.identifier, "union variant"
 
        )?;
 
        self.check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 

	
 
        // Construct polymorphic variables and mark the ones that are in use
 
        let mut poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
 
        for variant in &variants {
 
            for parser_type in &variant.embedded {
 
                Self::mark_used_polymorphic_variables(&mut poly_vars, parser_type);
 
            }
 
        }
 
        let is_polymorph = poly_vars.iter().any(|arg| arg.is_in_use);
 

	
 
        // Insert base definition in type table
 
        self.lookup.insert(definition_id, DefinedType {
 
            ast_root: root_id,
 
            ast_definition: definition_id,
 
            definition: DefinedTypeVariant::Union(UnionType{
 
                variants,
 
                tag_representation: Self::enum_tag_type(-1, tag_value),
 
            }),
 
            poly_vars,
 
            is_polymorph,
 
            monomorphs: Vec::new()
 
        });
 

	
 
        Ok(true)
 
    }
 

	
 
    /// Resolves the basic struct definition to an entry in the type table. It
 
    /// will not instantiate any monomorphized instances of polymorphic struct
 
    /// definitions.
 
    fn resolve_base_struct_definition(&mut self, modules: &[Module], ctx: &mut PassCtx, root_id: RootId, definition_id: DefinitionId) -> Result<bool, ParseError> {
 
        debug_assert!(ctx.heap[definition_id].is_struct());
 
        debug_assert!(!self.lookup.contains_key(&definition_id), "base struct already resolved");
 

	
 
        let definition = ctx.heap[definition_id].as_struct();
 

	
 
        // Make sure all fields point to resolvable types
 
        for field_definition in &definition.fields {
 
            let resolve_result = self.resolve_base_parser_type(modules, ctx, root_id, &field_definition.parser_type)?;
 
            if !self.ingest_resolve_result(modules, ctx, resolve_result)? {
 
                return Ok(false)
 
            }
 
        }
 

	
 
        // All fields types are resolved, construct base type
 
        let mut fields = Vec::with_capacity(definition.fields.len());
 
        for field_definition in &definition.fields {
 
            fields.push(StructField{
 
                identifier: field_definition.field.clone(),
 
                parser_type: field_definition.parser_type.clone(),
 
            })
 
        }
 

	
 
        // And make sure no conflicts exist in field names and/or polymorphic args
 
        self.check_identifier_collision(
 
            modules, root_id, &fields, |field| &field.identifier, "struct field"
 
        )?;
 
        self.check_poly_args_collision(modules, ctx, root_id, &definition.poly_vars)?;
 

	
 
        // Construct representation of polymorphic arguments
 
        let mut poly_vars = Self::create_polymorphic_variables(&definition.poly_vars);
src/protocol/tests/eval_calls.rs
Show inline comments
 
use super::*;
 

	
 
#[test]
 
fn test_function_call() {
 
    Tester::new_single_source_expect_ok("with literal arg", "
 
    func add_two(u32 value) -> u32 {
 
        return value + 2;
 
    }
 
    func foo() -> u32 {
 
        return add_two(5);
 
    }
 
    ").for_function("foo", |f| {
 
        f.call(Some(Value::UInt32(7)));
 
        f.call_ok(Some(Value::UInt32(7)));
 
    });
 

	
 
    println!("\n\n\n\n\n\n\n");
 

	
 
    Tester::new_single_source_expect_ok("with variable arg", "
 
    func add_two(u32 value) -> u32 {
 
        value += 1;
 
        return value + 1;
 
    }
 
    func foo() -> bool {
 
        auto initial = 5;
 
        auto result = add_two(initial);
 
        return initial == 5 && result == 7;
 
    }").for_function("foo", |f| {
 
        f.call(Some(Value::Bool(true)));
 
        f.call_ok(Some(Value::Bool(true)));
 
    });
 
}
 
\ No newline at end of file
src/protocol/tests/eval_operators.rs
Show inline comments
 
use super::*;
 

	
 
#[test]
 
fn test_assignment_operators() {
 
    fn construct_source(value_type: &str, value_initial: &str, value_op: &str) -> String {
 
        return format!(
 
            "func foo() -> {} {{
 
                {} value = {};
 
                value {};
 
                return value;
 
            }}",
 
            value_type, value_type, value_initial, value_op
 
        );
 
    }
 

	
 
    fn perform_test(name: &str, source: String, expected_value: Value) {
 
        Tester::new_single_source_expect_ok(name, source)
 
            .for_function("foo", move |f| {
 
                f.call(Some(expected_value));
 
                f.call_ok(Some(expected_value));
 
            });
 
    }
 

	
 
    perform_test(
 
        "set",
 
        construct_source("u32", "1", "= 5"),
 
        Value::UInt32(5)
 
    );
 

	
 
    perform_test(
 
        "multiplied",
 
        construct_source("u32", "2", "*= 4"),
 
        Value::UInt32(8)
 
    );
 

	
 
    perform_test(
 
        "divided",
 
        construct_source("u32", "8", "/= 4"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "remained",
 
        construct_source("u32", "8", "%= 3"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "added",
 
        construct_source("u32", "2", "+= 4"),
 
        Value::UInt32(6)
 
    );
 

	
 
    perform_test(
 
        "subtracted",
 
        construct_source("u32", "6", "-= 4"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "shifted left",
 
        construct_source("u32", "2", "<<= 2"),
 
        Value::UInt32(8)
 
    );
 

	
 
    perform_test(
 
        "shifted right",
 
        construct_source("u32", "8", ">>= 2"),
 
        Value::UInt32(2)
 
    );
 

	
 
    perform_test(
 
        "bitwise and",
 
        construct_source("u32", "15", "&= 35"),
 
        Value::UInt32(3)
 
    );
 

	
 
    perform_test(
 
        "bitwise xor",
 
        construct_source("u32", "3", "^= 7"),
 
        Value::UInt32(4)
 
    );
 

	
 
    perform_test(
 
        "bitwise or",
 
        construct_source("u32", "12", "|= 3"),
 
        Value::UInt32(15)
 
    );
 
}
 

	
 
#[test]
 
fn test_binary_integer_operators() {
 
    fn construct_source(value_type: &str, code: &str) -> String {
 
        format!("
 
        func foo() -> {} {{
 
            {}
 
        }}
 
        ", value_type, code)
 
    }
 

	
 
    fn perform_test(test_name: &str, value_type: &str, code: &str, expected_value: Value) {
 
        Tester::new_single_source_expect_ok(test_name, construct_source(value_type, code))
 
            .for_function("foo", move |f| {
 
                f.call(Some(expected_value));
 
                f.call_ok(Some(expected_value));
 
            });
 
    }
 

	
 
    perform_test(
 
        "bitwise_or", "u16",
 
        "auto a = 3; return a | 4;", Value::UInt16(7)
 
    );
 
    perform_test(
 
        "bitwise_xor", "u16",
 
        "auto a = 3; return a ^ 7;", Value::UInt16(4)
 
    );
 
    perform_test(
 
        "bitwise and", "u16",
 
        "auto a = 0b110011; return a & 0b011110;", Value::UInt16(0b010010)
 
    );
 
    perform_test(
 
        "shift left", "u16",
 
        "auto a = 0x0F; return a << 4;", Value::UInt16(0xF0)
 
    );
 
    perform_test(
 
        "shift right", "u64",
 
        "auto a = 0xF0; return a >> 4;", Value::UInt64(0x0F)
 
    );
 
    perform_test(
 
        "add", "u32",
 
        "auto a = 5; return a + 5;", Value::UInt32(10)
 
    );
 
    perform_test(
 
        "subtract", "u32",
 
        "auto a = 3; return a - 3;", Value::UInt32(0)
 
    );
 
    perform_test(
 
        "multiply", "u8",
 
        "auto a = 2 * 2; return a * 2 * 2;", Value::UInt8(16)
 
    );
 
    perform_test(
 
        "divide", "u8",
 
        "auto a = 32 / 2; return a / 2 / 2;", Value::UInt8(4)
 
    );
 
    perform_test(
 
        "remainder", "u16",
 
        "auto a = 29; return a % 3;", Value::UInt16(2)
 
    );
 
}
 
\ No newline at end of file

Changeset was too big and was cut off... Show full diff anyway

0 comments (0 inline, 0 general)