Files
@ b4ac681e0e7f
Branch filter:
Location: CSY/reowolf/src/runtime2/global_store.rs
b4ac681e0e7f
8.4 KiB
application/rls-services+xml
WIP on message-based sync impl
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 | use std::ptr;
use std::sync::{Arc, Barrier, RwLock, RwLockReadGuard};
use std::sync::atomic::{AtomicBool, AtomicU32};
use crate::collections::{MpmcQueue, RawVec};
use super::connector::{ConnectorPDL, ConnectorPublic};
use super::port::{PortIdLocal, Port, PortKind, PortOwnership, Channel};
use super::inbox::PublicInbox;
use super::scheduler::Router;
use crate::ProtocolDescription;
use crate::runtime2::connector::{ConnectorScheduling, RunDeltaState};
use crate::runtime2::inbox::{DataMessage, MessageContents, SyncMessage};
use crate::runtime2::native::Connector;
use crate::runtime2::scheduler::ConnectorCtx;
/// A kind of token that, once obtained, allows mutable access to a connector.
/// We're trying to use move semantics as much as possible: the owner of this
/// key is the only one that may execute the connector's code.
pub(crate) struct ConnectorKey {
pub index: u32, // of connector
}
impl ConnectorKey {
/// Downcasts the `ConnectorKey` type, which can be used to obtain mutable
/// access, to a "regular ID" which can be used to obtain immutable access.
#[inline]
pub fn downcast(&self) -> ConnectorId {
return ConnectorId(self.index);
}
/// Turns the `ConnectorId` into a `ConnectorKey`, marked as unsafe as it
/// bypasses the type-enforced `ConnectorKey`/`ConnectorId` system
#[inline]
pub unsafe fn from_id(id: ConnectorId) -> ConnectorKey {
return ConnectorKey{ index: id.0 };
}
}
/// A kind of token that allows shared access to a connector. Multiple threads
/// may hold this
#[derive(Copy, Clone)]
pub(crate) struct ConnectorId(pub u32);
impl ConnectorId {
// TODO: Like the other `new_invalid`, maybe remove
#[inline]
pub fn new_invalid() -> ConnectorId {
return ConnectorId(u32::MAX);
}
#[inline]
pub(crate) fn is_valid(&self) -> bool {
return self.0 != u32::MAX;
}
}
// TODO: Change this, I hate this. But I also don't want to put `public` and
// `router` of `ScheduledConnector` back into `Connector`. The reason I don't
// want `Box<dyn Connector>` everywhere is because of the v-table overhead. But
// to truly design this properly I need some benchmarks.
pub enum ConnectorVariant {
UserDefined(ConnectorPDL),
Native(Box<dyn Connector>),
}
impl Connector for ConnectorVariant {
fn handle_message(&mut self, message: MessageContents, ctx: &ConnectorCtx, delta_state: &mut RunDeltaState) {
match self {
ConnectorVariant::UserDefined(c) => c.handle_message(message, ctx, delta_state),
ConnectorVariant::Native(c) => c.handle_message(message, ctx, delta_state),
}
}
fn run(&mut self, protocol_description: &ProtocolDescription, ctx: &ConnectorCtx, delta_state: &mut RunDeltaState) -> ConnectorScheduling {
match self {
ConnectorVariant::UserDefined(c) => c.run(protocol_description, ctx, delta_state),
ConnectorVariant::Native(c) => c.run(protocol_description, ctx, delta_state),
}
}
}
pub struct ScheduledConnector {
pub connector: ConnectorVariant, // access by connector
pub context: ConnectorCtx, // mutable access by scheduler, immutable by connector
pub public: ConnectorPublic, // accessible by all schedulers and connectors
pub router: Router,
}
/// The registry containing all connectors. The idea here is that when someone
/// owns a `ConnectorKey`, then one has unique access to that connector.
/// Otherwise one has shared access.
///
/// This datastructure is built to be wrapped in a RwLock.
pub(crate) struct ConnectorStore {
pub(crate) port_counter: Arc<AtomicU32>,
inner: RwLock<ConnectorStoreInner>,
}
struct ConnectorStoreInner {
connectors: RawVec<*mut ScheduledConnector>,
free: Vec<usize>,
}
impl ConnectorStore {
fn with_capacity(capacity: usize) -> Self {
return Self{
port_counter: Arc::new(AtomicU32::new(0)),
inner: RwLock::new(ConnectorStoreInner {
connectors: RawVec::with_capacity(capacity),
free: Vec::with_capacity(capacity),
}),
};
}
/// Retrieves the shared members of the connector.
pub(crate) fn get_shared(&self, connector_id: ConnectorId) -> &'static ConnectorPublic {
let lock = self.inner.read().unwrap();
unsafe {
let connector = lock.connectors.get(connector_id.0 as usize);
debug_assert!(!connector.is_null());
return &*connector.public;
}
}
/// Retrieves a particular connector. Only the thread that pulled the
/// associated key out of the execution queue should (be able to) call this.
pub(crate) fn get_mut(&self, key: &ConnectorKey) -> &'static mut ScheduledConnector {
let lock = self.inner.read().unwrap();
unsafe {
let connector = lock.connectors.get_mut(key.index as usize);
debug_assert!(!connector.is_null());
return *connector as &mut _;
}
}
/// Create a new connector, returning the key that can be used to retrieve
/// and/or queue it.
pub(crate) fn create(&self, created_by: &mut ScheduledConnector, connector: ConnectorVariant) -> ConnectorKey {
// Creation of the connector in the global store, requires a lock
{
let lock = self.inner.write().unwrap();
let connector = ScheduledConnector {
connector,
context: ConnectorCtx::new(self.port_counter.clone()),
public: ConnectorPublic::new(),
router: Router::new(),
};
let index;
if lock.free.is_empty() {
let connector = Box::into_raw(Box::new(connector));
unsafe {
// Cheating a bit here. Anyway, move to heap, store in list
index = lock.connectors.len();
lock.connectors.push(connector);
}
} else {
index = lock.free.pop().unwrap();
unsafe {
let target = lock.connectors.get_mut(index);
debug_assert!(!target.is_null());
ptr::write(*target, connector);
}
}
}
// Setting of new connector's ID
let key = ConnectorKey{ index: index as u32 };
let new_connector = self.get_mut(&key);
new_connector.context.id = key.downcast();
// Transferring ownership of ports (and crashing if there is a
// programmer's mistake in port management)
match &new_connector.connector {
ConnectorVariant::UserDefined(connector) => {
for port_id in &connector.ports.owned_ports {
let mut port = created_by.context.remove_port(*port_id);
port.owning_connector = new_connector.context.id;
new_connector.context.add_port(port);
}
},
ConnectorVariant::Native(_) => {}, // no initial ports (yet!)
}
return key;
}
pub(crate) fn destroy(&self, key: ConnectorKey) {
let lock = self.inner.write().unwrap();
unsafe {
let connector = lock.connectors.get_mut(key.index as usize);
ptr::drop_in_place(*connector);
// Note: but not deallocating!
}
lock.free.push(key.index as usize);
}
}
impl Drop for ConnectorStore {
fn drop(&mut self) {
let lock = self.inner.write().unwrap();
for idx in 0..lock.connectors.len() {
unsafe {
let memory = *lock.connectors.get_mut(idx);
let _ = Box::from_raw(memory); // takes care of deallocation
}
}
}
}
/// Global store of connectors, ports and queues that are used by the sceduler
/// threads. The global store has the appearance of a thread-safe datatype, but
/// one needs to be careful using it.
///
/// TODO: @docs
/// TODO: @Optimize, very lazy implementation of concurrent datastructures.
/// This includes the `should_exit` and `did_exit` pair!
pub struct GlobalStore {
pub connector_queue: MpmcQueue<ConnectorKey>,
pub connectors: ConnectorStore,
pub should_exit: AtomicBool, // signal threads to exit
}
impl GlobalStore {
pub fn new() -> Self {
Self{
connector_queue: MpmcQueue::with_capacity(256),
connectors: ConnectorStore::with_capacity(256),
should_exit: AtomicBool::new(false),
}
}
}
|