Files
@ 1aef293674a6
Branch filter:
Location: CSY/reowolf/src/collections/mpmc_queue.rs - annotation
1aef293674a6
800 B
application/rls-services+xml
experimenting with multithreaded scheduler sync primitives
1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 1aef293674a6 | use std::sync::Mutex;
use std::collections::VecDeque;
/// Generic multiple-producer, multiple-consumer queue. Current implementation
/// has the required functionality, without all of the optimizations.
/// TODO: @Optimize
pub struct MpmcQueue<T: Sized> {
queue: Mutex<VecDeque<T>>,
}
impl<T: Sized> MpmcQueue<T> {
pub fn new() -> Self {
Self::with_capacity(0)
}
pub fn with_capacity(capacity: usize) -> Self {
Self{
queue: Mutex::new(VecDeque::with_capacity(capacity)),
}
}
pub fn push_back(&self, item: T) {
let mut queue = self.queue.lock().unwrap();
queue.push_back(item);
}
pub fn pop_front(&self) -> Option<T> {
let mut queue = self.queue.lock().unwrap();
return queue.pop_front();
}
}
|