From 71b5f9438e1beb5fe12b90415d9d6307e79c0cdf Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 23 Jan 2023 20:57:56 +0100 Subject: Fixed systick monotonic --- rtic-time/src/linked_list.rs | 173 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 rtic-time/src/linked_list.rs (limited to 'rtic-time/src/linked_list.rs') diff --git a/rtic-time/src/linked_list.rs b/rtic-time/src/linked_list.rs new file mode 100644 index 0000000..42ff8cb --- /dev/null +++ b/rtic-time/src/linked_list.rs @@ -0,0 +1,173 @@ +//! ... + +use core::marker::PhantomPinned; +use core::sync::atomic::{AtomicPtr, Ordering}; +use critical_section as cs; + +/// A sorted linked list for the timer queue. +pub struct LinkedList { + head: AtomicPtr>, +} + +impl LinkedList { + /// Create a new linked list. + pub const fn new() -> Self { + Self { + head: AtomicPtr::new(core::ptr::null_mut()), + } + } +} + +impl LinkedList { + /// Pop the first element in the queue if the closure returns true. + pub fn pop_if bool>(&self, f: F) -> Option { + cs::with(|_| { + // Make sure all previous writes are visible + core::sync::atomic::fence(Ordering::SeqCst); + + let head = self.head.load(Ordering::Relaxed); + + // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link + if let Some(head) = unsafe { head.as_ref() } { + if f(&head.val) { + // Move head to the next element + self.head + .store(head.next.load(Ordering::Relaxed), Ordering::Relaxed); + + // We read the value at head + let head_val = head.val.clone(); + + return Some(head_val); + } + } + None + }) + } + + /// Delete a link at an address. + pub fn delete(&self, addr: usize) { + cs::with(|_| { + // Make sure all previous writes are visible + core::sync::atomic::fence(Ordering::SeqCst); + + let head = self.head.load(Ordering::Relaxed); + + // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link + let head_ref = if let Some(head_ref) = unsafe { head.as_ref() } { + head_ref + } else { + // 1. List is empty, do nothing + return; + }; + + if head as *const _ as usize == addr { + // 2. Replace head with head.next + self.head + .store(head_ref.next.load(Ordering::Relaxed), Ordering::Relaxed); + + return; + } + + // 3. search list for correct node + let mut curr = head_ref; + let mut next = head_ref.next.load(Ordering::Relaxed); + + // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link + while let Some(next_link) = unsafe { next.as_ref() } { + // Next is not null + + if next as *const _ as usize == addr { + curr.next + .store(next_link.next.load(Ordering::Relaxed), Ordering::Relaxed); + + return; + } + + // Continue searching + curr = next_link; + next = next_link.next.load(Ordering::Relaxed); + } + }) + } + + /// Insert a new link into the linked list. + /// The return is (was_empty, address), where the address of the link is for use with `delete`. + pub fn insert(&self, val: &mut Link) -> (bool, usize) { + cs::with(|_| { + let addr = val as *const _ as usize; + + // Make sure all previous writes are visible + core::sync::atomic::fence(Ordering::SeqCst); + + let head = self.head.load(Ordering::Relaxed); + + // 3 cases to handle + + // 1. List is empty, write to head + // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link + let head_ref = if let Some(head_ref) = unsafe { head.as_ref() } { + head_ref + } else { + self.head.store(val, Ordering::Relaxed); + return (true, addr); + }; + + // 2. val needs to go in first + if val.val < head_ref.val { + // Set current head as next of `val` + val.next.store(head, Ordering::Relaxed); + + // `val` is now first in the queue + self.head.store(val, Ordering::Relaxed); + + return (false, addr); + } + + // 3. search list for correct place + let mut curr = head_ref; + let mut next = head_ref.next.load(Ordering::Relaxed); + + // SAFETY: `as_ref` is safe as `insert` requires a valid reference to a link + while let Some(next_link) = unsafe { next.as_ref() } { + // Next is not null + + if val.val < next_link.val { + // Replace next with `val` + val.next.store(next, Ordering::Relaxed); + + // Insert `val` + curr.next.store(val, Ordering::Relaxed); + + return (false, addr); + } + + // Continue searching + curr = next_link; + next = next_link.next.load(Ordering::Relaxed); + } + + // No next, write link to last position in list + curr.next.store(val, Ordering::Relaxed); + + (false, addr) + }) + } +} + +/// A link in the linked list. +pub struct Link { + val: T, + next: AtomicPtr>, + _up: PhantomPinned, +} + +impl Link { + /// Create a new link. + pub const fn new(val: T) -> Self { + Self { + val, + next: AtomicPtr::new(core::ptr::null_mut()), + _up: PhantomPinned, + } + } +} -- cgit v1.2.3 From 143cd136eeeb2856d06a1b83e3ef5682f720c251 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Tue, 24 Jan 2023 11:55:48 +0100 Subject: Optimize linked list popping so delete is not run everytime --- rtic-time/src/linked_list.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'rtic-time/src/linked_list.rs') diff --git a/rtic-time/src/linked_list.rs b/rtic-time/src/linked_list.rs index 42ff8cb..52a955b 100644 --- a/rtic-time/src/linked_list.rs +++ b/rtic-time/src/linked_list.rs @@ -5,7 +5,7 @@ use core::sync::atomic::{AtomicPtr, Ordering}; use critical_section as cs; /// A sorted linked list for the timer queue. -pub struct LinkedList { +pub(crate) struct LinkedList { head: AtomicPtr>, } @@ -156,7 +156,7 @@ impl LinkedList { /// A link in the linked list. pub struct Link { - val: T, + pub(crate) val: T, next: AtomicPtr>, _up: PhantomPinned, } -- cgit v1.2.3 From 3050fc0591f087a4fbe08840c69633e89d3f58a7 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sat, 28 Jan 2023 13:21:44 +0100 Subject: Use `Pin` in the linked lists --- rtic-time/src/linked_list.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'rtic-time/src/linked_list.rs') diff --git a/rtic-time/src/linked_list.rs b/rtic-time/src/linked_list.rs index 52a955b..de5ea2a 100644 --- a/rtic-time/src/linked_list.rs +++ b/rtic-time/src/linked_list.rs @@ -1,6 +1,7 @@ //! ... use core::marker::PhantomPinned; +use core::pin::Pin; use core::sync::atomic::{AtomicPtr, Ordering}; use critical_section as cs; @@ -92,8 +93,10 @@ impl LinkedList { /// Insert a new link into the linked list. /// The return is (was_empty, address), where the address of the link is for use with `delete`. - pub fn insert(&self, val: &mut Link) -> (bool, usize) { + pub fn insert(&self, val: Pin<&mut Link>) -> (bool, usize) { cs::with(|_| { + // SAFETY: This datastructure does not move the underlying value. + let val = unsafe { val.get_unchecked_mut() }; let addr = val as *const _ as usize; // Make sure all previous writes are visible -- cgit v1.2.3 From 1cda61fbda205920517f7b63af90c97c38ff9af6 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sat, 18 Feb 2023 09:43:06 +0100 Subject: Make some linked list operations unsafe, and document their safety at usage --- rtic-time/src/linked_list.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'rtic-time/src/linked_list.rs') diff --git a/rtic-time/src/linked_list.rs b/rtic-time/src/linked_list.rs index de5ea2a..d4256c9 100644 --- a/rtic-time/src/linked_list.rs +++ b/rtic-time/src/linked_list.rs @@ -93,10 +93,12 @@ impl LinkedList { /// Insert a new link into the linked list. /// The return is (was_empty, address), where the address of the link is for use with `delete`. - pub fn insert(&self, val: Pin<&mut Link>) -> (bool, usize) { + /// + /// SAFETY: The pinned link must live until it is removed from this list. + pub unsafe fn insert(&self, val: Pin<&Link>) -> (bool, usize) { cs::with(|_| { // SAFETY: This datastructure does not move the underlying value. - let val = unsafe { val.get_unchecked_mut() }; + let val = val.get_ref(); let addr = val as *const _ as usize; // Make sure all previous writes are visible @@ -111,7 +113,8 @@ impl LinkedList { let head_ref = if let Some(head_ref) = unsafe { head.as_ref() } { head_ref } else { - self.head.store(val, Ordering::Relaxed); + self.head + .store(val as *const _ as *mut _, Ordering::Relaxed); return (true, addr); }; @@ -121,7 +124,8 @@ impl LinkedList { val.next.store(head, Ordering::Relaxed); // `val` is now first in the queue - self.head.store(val, Ordering::Relaxed); + self.head + .store(val as *const _ as *mut _, Ordering::Relaxed); return (false, addr); } @@ -139,7 +143,8 @@ impl LinkedList { val.next.store(next, Ordering::Relaxed); // Insert `val` - curr.next.store(val, Ordering::Relaxed); + curr.next + .store(val as *const _ as *mut _, Ordering::Relaxed); return (false, addr); } @@ -150,7 +155,8 @@ impl LinkedList { } // No next, write link to last position in list - curr.next.store(val, Ordering::Relaxed); + curr.next + .store(val as *const _ as *mut _, Ordering::Relaxed); (false, addr) }) -- cgit v1.2.3