Files
@ 58dfabd1be9f
Branch filter:
Location: CSY/reowolf/src/runtime2/global_store.rs
58dfabd1be9f
9.6 KiB
application/rls-services+xml
moving to laptop
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 | use crate::collections::{MpmcQueue, RawVec};
use super::connector::{ConnectorPDL, ConnectorPublic};
use super::port::{PortIdLocal, Port, PortKind, PortOwnership, Channel};
use super::inbox::PublicInbox;
use super::scheduler::Router;
use std::ptr;
use std::sync::{Barrier, RwLock, RwLockReadGuard};
use std::sync::atomic::AtomicBool;
use crate::runtime2::native::Connector;
/// A kind of token that, once obtained, allows mutable access to a connector.
/// We're trying to use move semantics as much as possible: the owner of this
/// key is the only one that may execute the connector's code.
pub(crate) struct ConnectorKey {
pub index: u32, // of connector
}
impl ConnectorKey {
/// Downcasts the `ConnectorKey` type, which can be used to obtain mutable
/// access, to a "regular ID" which can be used to obtain immutable access.
#[inline]
pub fn downcast(&self) -> ConnectorId {
return ConnectorId(self.index);
}
/// Turns the `ConnectorId` into a `ConnectorKey`, marked as unsafe as it
/// bypasses the type-enforced `ConnectorKey`/`ConnectorId` system
#[inline]
pub unsafe fn from_id(id: ConnectorId) -> ConnectorKey {
return ConnectorKey{ index: id.0 };
}
}
/// A kind of token that allows shared access to a connector. Multiple threads
/// may hold this
#[derive(Copy, Clone)]
pub(crate) struct ConnectorId(u32);
impl ConnectorId {
// TODO: Like the other `new_invalid`, maybe remove
#[inline]
pub fn new_invalid() -> ConnectorId {
return ConnectorId(u32::MAX);
}
}
// TODO: Change this, I hate this. But I also don't want to put `public` and
// `router` of `ScheduledConnector` back into `Connector`.
pub enum ConnectorVariant {
UserDefined(ConnectorPDL),
Native(Box<dyn Connector>),
}
pub struct ScheduledConnector {
pub connector: ConnectorVariant,
pub public: ConnectorPublic,
pub router: Router
}
/// The registry containing all connectors. The idea here is that when someone
/// owns a `ConnectorKey`, then one has unique access to that connector.
/// Otherwise one has shared access.
///
/// This datastructure is built to be wrapped in a RwLock.
struct ConnectorStore {
inner: RwLock<ConnectorStoreInner>,
}
struct ConnectorStoreInner {
connectors: RawVec<*mut ScheduledConnector>,
free: Vec<usize>,
}
impl ConnectorStore {
fn with_capacity(capacity: usize) -> Self {
return Self{
inner: RwLock::new(ConnectorStoreInner {
connectors: RawVec::with_capacity(capacity),
free: Vec::with_capacity(capacity),
}),
};
}
/// Retrieves the shared members of the connector.
pub(crate) fn get_shared(&self, connector_id: ConnectorId) -> &'static ConnectorPublic {
let lock = self.inner.read().unwrap();
unsafe {
let connector = lock.connectors.get(connector_id.0 as usize);
debug_assert!(!connector.is_null());
return &*connector.public;
}
}
/// Retrieves a particular connector. Only the thread that pulled the
/// associated key out of the execution queue should (be able to) call this.
pub(crate) fn get_mut(&self, key: &ConnectorKey) -> &'static mut ScheduledConnector {
let lock = self.inner.read().unwrap();
unsafe {
let connector = lock.connectors.get_mut(key.index as usize);
debug_assert!(!connector.is_null());
return *connector as &mut _;
}
}
/// Create a new connector, returning the key that can be used to retrieve
/// and/or queue it.
pub(crate) fn create(&self, connector: ConnectorVariant) -> ConnectorKey {
let lock = self.inner.write().unwrap();
let connector = ScheduledConnector{
connector,
public: ConnectorPublic::new(),
router: Router::new(),
};
let index;
if lock.free.is_empty() {
let connector = Box::into_raw(Box::new(connector));
unsafe {
// Cheating a bit here. Anyway, move to heap, store in list
index = lock.connectors.len();
lock.connectors.push(connector);
}
} else {
index = lock.free.pop().unwrap();
unsafe {
let target = lock.connectors.get_mut(index);
debug_assert!(!target.is_null());
ptr::write(*target, connector);
}
}
return ConnectorKey{ index: index as u32 };
}
pub(crate) fn destroy(&self, key: ConnectorKey) {
let lock = self.inner.write().unwrap();
unsafe {
let connector = lock.connectors.get_mut(key.index as usize);
ptr::drop_in_place(*connector);
// Note: but not deallocating!
}
lock.free.push(key.index as usize);
}
}
impl Drop for ConnectorStore {
fn drop(&mut self) {
let lock = self.inner.write().unwrap();
for idx in 0..lock.connectors.len() {
unsafe {
let memory = *lock.connectors.get_mut(idx);
let _ = Box::from_raw(memory); // takes care of deallocation
}
}
}
}
/// The registry of all ports
pub struct PortStore {
inner: RwLock<PortStoreInner>,
}
struct PortStoreInner {
ports: RawVec<Port>,
free: Vec<usize>,
}
impl PortStore {
fn with_capacity(capacity: usize) -> Self {
Self{
inner: RwLock::new(PortStoreInner{
ports: RawVec::with_capacity(capacity),
free: Vec::with_capacity(capacity),
}),
}
}
pub(crate) fn get(&self, key: &ConnectorKey, port_id: PortIdLocal) -> PortRef {
let lock = self.inner.read().unwrap();
debug_assert!(port_id.is_valid());
unsafe {
let port = lock.ports.get_mut(port_id.index as usize);
let port = &mut *port;
debug_assert_eq!(port.owning_connector_id, key.index); // race condition (if they are not equal, which should never happen), better than nothing
return PortRef{ lock, port };
}
}
pub(crate) fn create_channel(&self, creating_connector: ConnectorId) -> Channel {
let mut lock = self.inner.write().unwrap();
// Reserves a new port. Doesn't point it to its counterpart
fn reserve_port(lock: &mut std::sync::RwLockWriteGuard<'_, PortStoreInner>, kind: PortKind, creating_connector: ConnectorId) -> u32 {
let index;
if lock.free.is_empty() {
index = lock.ports.len() as u32;
lock.ports.push(Port{
self_id: PortIdLocal::new(index),
peer_id: PortIdLocal::new_invalid(),
kind,
ownership: PortOwnership::Owned,
owning_connector: connector_id,
peer_connector: connector_id
});
} else {
index = lock.free.pop().unwrap() as u32;
let port = unsafe{ &mut *lock.ports.get_mut(index as usize) };
port.peer_id = PortIdLocal::new_invalid();
port.kind = kind;
port.ownership = PortOwnership::Owned;
port.owning_connector = connector_id;
port.peer_connector = connector_id;
}
return index;
}
// Create the ports
let putter_id = reserve_port(&mut lock, PortKind::Putter, creating_connector);
let getter_id = reserve_port(&mut lock, PortKind::Getter, creating_connector);
debug_assert_ne!(putter_id, getter_id);
// Point them to one another
unsafe {
let putter_port = &mut *lock.ports.get_mut(putter_id as usize);
let getter_port = &mut *lock.ports.get_mut(getter_id as usize);
putter_port.peer_id = getter_port.self_id;
getter_port.peer_id = putter_port.self_id;
}
return Channel{
putter_id: PortIdLocal::new(putter_id),
getter_id: PortIdLocal::new(getter_id),
}
}
}
pub struct PortRef<'p> {
lock: RwLockReadGuard<'p, PortStoreInner>,
port: &'static mut Port,
}
impl<'p> std::ops::Deref for PortRef<'p> {
type Target = Port;
fn deref(&self) -> &Self::Target {
return self.port;
}
}
impl<'p> std::ops::DerefMut for PortRef<'p> {
fn deref_mut(&mut self) -> &mut Self::Target {
return self.port;
}
}
impl Drop for PortStore {
fn drop(&mut self) {
let lock = self.inner.write().unwrap();
// Very lazy code
for idx in 0..lock.ports.len() {
if lock.free.contains(&idx) {
continue;
}
unsafe {
let port = lock.ports.get_mut(idx);
std::ptr::drop_in_place(port);
}
}
}
}
/// Global store of connectors, ports and queues that are used by the sceduler
/// threads. The global store has the appearance of a thread-safe datatype, but
/// one needs to be careful using it.
///
/// TODO: @docs
/// TODO: @Optimize, very lazy implementation of concurrent datastructures.
/// This includes the `should_exit` and `did_exit` pair!
pub struct GlobalStore {
pub connector_queue: MpmcQueue<ConnectorKey>,
pub connectors: ConnectorStore,
pub ports: PortStore,
pub should_exit: AtomicBool, // signal threads to exit
}
impl GlobalStore {
pub fn new() -> Self {
Self{
connector_queue: MpmcQueue::with_capacity(256),
connectors: ConnectorStore::with_capacity(256),
ports: PortStore::with_capacity(256),
should_exit: AtomicBool::new(false),
}
}
}
|