aboutsummaryrefslogtreecommitdiff
path: root/rtic-common/src
diff options
context:
space:
mode:
Diffstat (limited to 'rtic-common/src')
-rw-r--r--rtic-common/src/dropper.rs26
-rw-r--r--rtic-common/src/lib.rs13
-rw-r--r--rtic-common/src/wait_queue.rs275
-rw-r--r--rtic-common/src/waker_registration.rs66
4 files changed, 380 insertions, 0 deletions
diff --git a/rtic-common/src/dropper.rs b/rtic-common/src/dropper.rs
new file mode 100644
index 0000000..a4b4d15
--- /dev/null
+++ b/rtic-common/src/dropper.rs
@@ -0,0 +1,26 @@
+//! A drop implementation runner.
+
+/// Runs a closure on drop.
+pub struct OnDrop<F: FnOnce()> {
+ f: core::mem::MaybeUninit<F>,
+}
+
+impl<F: FnOnce()> OnDrop<F> {
+ /// Make a new droppper given a closure.
+ pub fn new(f: F) -> Self {
+ Self {
+ f: core::mem::MaybeUninit::new(f),
+ }
+ }
+
+ /// Make it not run drop.
+ pub fn defuse(self) {
+ core::mem::forget(self)
+ }
+}
+
+impl<F: FnOnce()> Drop for OnDrop<F> {
+ fn drop(&mut self) {
+ unsafe { self.f.as_ptr().read()() }
+ }
+}
diff --git a/rtic-common/src/lib.rs b/rtic-common/src/lib.rs
new file mode 100644
index 0000000..03d0306
--- /dev/null
+++ b/rtic-common/src/lib.rs
@@ -0,0 +1,13 @@
+//! Crate
+
+#![no_std]
+#![deny(missing_docs)]
+//deny_warnings_placeholder_for_ci
+
+#[cfg(test)]
+#[macro_use]
+extern crate std;
+
+pub mod dropper;
+pub mod wait_queue;
+pub mod waker_registration;
diff --git a/rtic-common/src/wait_queue.rs b/rtic-common/src/wait_queue.rs
new file mode 100644
index 0000000..b1aa775
--- /dev/null
+++ b/rtic-common/src/wait_queue.rs
@@ -0,0 +1,275 @@
+//! ...
+
+use core::marker::PhantomPinned;
+use core::pin::Pin;
+use core::ptr::null_mut;
+use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
+use core::task::Waker;
+use critical_section as cs;
+
+/// A helper definition of a wait queue.
+pub type WaitQueue = LinkedList<Waker>;
+
+/// A FIFO linked list for a wait queue.
+pub struct LinkedList<T> {
+ head: AtomicPtr<Link<T>>, // UnsafeCell<*mut Link<T>>
+ tail: AtomicPtr<Link<T>>,
+}
+
+impl<T> LinkedList<T> {
+ /// Create a new linked list.
+ pub const fn new() -> Self {
+ Self {
+ head: AtomicPtr::new(null_mut()),
+ tail: AtomicPtr::new(null_mut()),
+ }
+ }
+}
+
+impl<T: Clone> LinkedList<T> {
+ const R: Ordering = Ordering::Relaxed;
+
+ /// Pop the first element in the queue.
+ pub fn pop(&self) -> Option<T> {
+ cs::with(|_| {
+ // Make sure all previous writes are visible
+ core::sync::atomic::fence(Ordering::SeqCst);
+
+ let head = self.head.load(Self::R);
+
+ // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link
+ if let Some(head_ref) = unsafe { head.as_ref() } {
+ // Move head to the next element
+ self.head.store(head_ref.next.load(Self::R), Self::R);
+
+ // We read the value at head
+ let head_val = head_ref.val.clone();
+
+ let tail = self.tail.load(Self::R);
+ if head == tail {
+ // The queue is empty
+ self.tail.store(null_mut(), Self::R);
+ }
+
+ if let Some(next_ref) = unsafe { head_ref.next.load(Self::R).as_ref() } {
+ next_ref.prev.store(null_mut(), Self::R);
+ }
+
+ // Clear the pointers in the node.
+ head_ref.next.store(null_mut(), Self::R);
+ head_ref.prev.store(null_mut(), Self::R);
+ head_ref.is_popped.store(true, Self::R);
+
+ return Some(head_val);
+ }
+
+ None
+ })
+ }
+
+ /// Put an element at the back of the queue.
+ ///
+ /// # Safety
+ ///
+ /// The link must live until it is removed from the queue.
+ pub unsafe fn push(&self, link: Pin<&Link<T>>) {
+ cs::with(|_| {
+ // Make sure all previous writes are visible
+ core::sync::atomic::fence(Ordering::SeqCst);
+
+ let tail = self.tail.load(Self::R);
+
+ // SAFETY: This datastructure does not move the underlying value.
+ let link = link.get_ref();
+
+ if let Some(tail_ref) = unsafe { tail.as_ref() } {
+ // Queue is not empty
+ link.prev.store(tail, Self::R);
+ self.tail.store(link as *const _ as *mut _, Self::R);
+ tail_ref.next.store(link as *const _ as *mut _, Self::R);
+ } else {
+ // Queue is empty
+ self.tail.store(link as *const _ as *mut _, Self::R);
+ self.head.store(link as *const _ as *mut _, Self::R);
+ }
+ });
+ }
+
+ /// Check if the queue is empty.
+ pub fn is_empty(&self) -> bool {
+ self.head.load(Self::R).is_null()
+ }
+}
+
+/// A link in the linked list.
+pub struct Link<T> {
+ pub(crate) val: T,
+ next: AtomicPtr<Link<T>>,
+ prev: AtomicPtr<Link<T>>,
+ is_popped: AtomicBool,
+ _up: PhantomPinned,
+}
+
+impl<T: Clone> Link<T> {
+ const R: Ordering = Ordering::Relaxed;
+
+ /// Create a new link.
+ pub const fn new(val: T) -> Self {
+ Self {
+ val,
+ next: AtomicPtr::new(null_mut()),
+ prev: AtomicPtr::new(null_mut()),
+ is_popped: AtomicBool::new(false),
+ _up: PhantomPinned,
+ }
+ }
+
+ /// Return true if this link has been poped from the list.
+ pub fn is_popped(&self) -> bool {
+ self.is_popped.load(Self::R)
+ }
+
+ /// Remove this link from a linked list.
+ pub fn remove_from_list(&self, list: &LinkedList<T>) {
+ cs::with(|_| {
+ // Make sure all previous writes are visible
+ core::sync::atomic::fence(Ordering::SeqCst);
+
+ let prev = self.prev.load(Self::R);
+ let next = self.next.load(Self::R);
+ self.is_popped.store(true, Self::R);
+
+ match unsafe { (prev.as_ref(), next.as_ref()) } {
+ (None, None) => {
+ // Not in the list or alone in the list, check if list head == node address
+ let sp = self as *const _;
+
+ if sp == list.head.load(Ordering::Relaxed) {
+ list.head.store(null_mut(), Self::R);
+ list.tail.store(null_mut(), Self::R);
+ }
+ }
+ (None, Some(next_ref)) => {
+ // First in the list
+ next_ref.prev.store(null_mut(), Self::R);
+ list.head.store(next, Self::R);
+ }
+ (Some(prev_ref), None) => {
+ // Last in the list
+ prev_ref.next.store(null_mut(), Self::R);
+ list.tail.store(prev, Self::R);
+ }
+ (Some(prev_ref), Some(next_ref)) => {
+ // Somewhere in the list
+
+ // Connect the `prev.next` and `next.prev` with each other to remove the node
+ prev_ref.next.store(next, Self::R);
+ next_ref.prev.store(prev, Self::R);
+ }
+ }
+ })
+ }
+}
+
+#[cfg(test)]
+impl<T: core::fmt::Debug + Clone> LinkedList<T> {
+ fn print(&self) {
+ cs::with(|_| {
+ // Make sure all previous writes are visible
+ core::sync::atomic::fence(Ordering::SeqCst);
+
+ let mut head = self.head.load(Self::R);
+ let tail = self.tail.load(Self::R);
+
+ println!(
+ "List - h = 0x{:x}, t = 0x{:x}",
+ head as usize, tail as usize
+ );
+
+ let mut i = 0;
+
+ // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link
+ while let Some(head_ref) = unsafe { head.as_ref() } {
+ println!(
+ " {}: {:?}, s = 0x{:x}, n = 0x{:x}, p = 0x{:x}",
+ i,
+ head_ref.val,
+ head as usize,
+ head_ref.next.load(Ordering::Relaxed) as usize,
+ head_ref.prev.load(Ordering::Relaxed) as usize
+ );
+
+ head = head_ref.next.load(Self::R);
+
+ i += 1;
+ }
+ });
+ }
+}
+
+#[cfg(test)]
+impl<T: core::fmt::Debug + Clone> Link<T> {
+ fn print(&self) {
+ cs::with(|_| {
+ // Make sure all previous writes are visible
+ core::sync::atomic::fence(Ordering::SeqCst);
+
+ println!("Link:");
+
+ println!(
+ " val = {:?}, n = 0x{:x}, p = 0x{:x}",
+ self.val,
+ self.next.load(Ordering::Relaxed) as usize,
+ self.prev.load(Ordering::Relaxed) as usize
+ );
+ });
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn linked_list() {
+ let wq = LinkedList::<u32>::new();
+
+ let i1 = Link::new(10);
+ let i2 = Link::new(11);
+ let i3 = Link::new(12);
+ let i4 = Link::new(13);
+ let i5 = Link::new(14);
+
+ unsafe { wq.push(Pin::new_unchecked(&i1)) };
+ unsafe { wq.push(Pin::new_unchecked(&i2)) };
+ unsafe { wq.push(Pin::new_unchecked(&i3)) };
+ unsafe { wq.push(Pin::new_unchecked(&i4)) };
+ unsafe { wq.push(Pin::new_unchecked(&i5)) };
+
+ wq.print();
+
+ wq.pop();
+ i1.print();
+
+ wq.print();
+
+ i4.remove_from_list(&wq);
+
+ wq.print();
+
+ // i1.remove_from_list(&wq);
+ // wq.print();
+
+ println!("i2");
+ i2.remove_from_list(&wq);
+ wq.print();
+
+ println!("i3");
+ i3.remove_from_list(&wq);
+ wq.print();
+
+ println!("i5");
+ i5.remove_from_list(&wq);
+ wq.print();
+ }
+}
diff --git a/rtic-common/src/waker_registration.rs b/rtic-common/src/waker_registration.rs
new file mode 100644
index 0000000..174765c
--- /dev/null
+++ b/rtic-common/src/waker_registration.rs
@@ -0,0 +1,66 @@
+//! Waker registration utility.
+
+use core::cell::UnsafeCell;
+use core::task::Waker;
+
+/// A critical section based waker handler.
+pub struct CriticalSectionWakerRegistration {
+ waker: UnsafeCell<Option<Waker>>,
+}
+
+unsafe impl Send for CriticalSectionWakerRegistration {}
+unsafe impl Sync for CriticalSectionWakerRegistration {}
+
+impl CriticalSectionWakerRegistration {
+ /// Create a new waker registration.
+ pub const fn new() -> Self {
+ Self {
+ waker: UnsafeCell::new(None),
+ }
+ }
+
+ /// Register a waker.
+ /// This will overwrite the previous waker if there was one.
+ pub fn register(&self, new_waker: &Waker) {
+ critical_section::with(|_| {
+ // SAFETY: This access is protected by the critical section.
+ let self_waker = unsafe { &mut *self.waker.get() };
+
+ // From embassy
+ // https://github.com/embassy-rs/embassy/blob/b99533607ceed225dd12ae73aaa9a0d969a7365e/embassy-sync/src/waitqueue/waker.rs#L59-L61
+ match self_waker {
+ // Optimization: If both the old and new Wakers wake the same task, we can simply
+ // keep the old waker, skipping the clone. (In most executor implementations,
+ // cloning a waker is somewhat expensive, comparable to cloning an Arc).
+ Some(ref w2) if (w2.will_wake(new_waker)) => {}
+ _ => {
+ // clone the new waker and store it
+ if let Some(old_waker) = core::mem::replace(self_waker, Some(new_waker.clone()))
+ {
+ // We had a waker registered for another task. Wake it, so the other task can
+ // reregister itself if it's still interested.
+ //
+ // If two tasks are waiting on the same thing concurrently, this will cause them
+ // to wake each other in a loop fighting over this WakerRegistration. This wastes
+ // CPU but things will still work.
+ //
+ // If the user wants to have two tasks waiting on the same thing they should use
+ // a more appropriate primitive that can store multiple wakers.
+ old_waker.wake()
+ }
+ }
+ }
+ });
+ }
+
+ /// Wake the waker.
+ pub fn wake(&self) {
+ critical_section::with(|_| {
+ // SAFETY: This access is protected by the critical section.
+ let self_waker = unsafe { &mut *self.waker.get() };
+ if let Some(waker) = self_waker.take() {
+ waker.wake()
+ }
+ });
+ }
+}