From 7614b96fe45240dafe91ae549e712b560e2d4c10 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sat, 31 Dec 2022 14:45:13 +0100 Subject: RTIC v2: Initial commit rtic-syntax is now part of RTIC repository --- src/lib.rs | 127 +++++-------------------------------------------------------- 1 file changed, 9 insertions(+), 118 deletions(-) (limited to 'src') diff --git a/src/lib.rs b/src/lib.rs index 0c0d0cc..7d12d9a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,123 +1,14 @@ -//! Real-Time Interrupt-driven Concurrency (RTIC) framework for ARM Cortex-M microcontrollers. -//! -//! **IMPORTANT**: This crate is published as [`cortex-m-rtic`] on crates.io but the name of the -//! library is `rtic`. -//! -//! [`cortex-m-rtic`]: https://crates.io/crates/cortex-m-rtic -//! -//! The user level documentation can be found [here]. -//! -//! [here]: https://rtic.rs -//! -//! Don't forget to check the documentation of the `#[app]` attribute (listed under the reexports -//! section), which is the main component of the framework. -//! -//! # Minimum Supported Rust Version (MSRV) -//! -//! This crate is compiled and tested with the latest toolchain (rolling) as of the release date. -//! If you run into compilation errors, try the latest stable release of the rust toolchain. -//! -//! # Semantic Versioning -//! -//! Like the Rust project, this crate adheres to [SemVer]: breaking changes in the API and semantics -//! require a *semver bump* (since 1.0.0 a new major version release), with the exception of breaking changes -//! that fix soundness issues -- those are considered bug fixes and can be landed in a new patch -//! release. -//! -//! [SemVer]: https://semver.org/spec/v2.0.0.html - -#![deny(missing_docs)] -#![deny(rust_2021_compatibility)] -#![deny(rust_2018_compatibility)] -#![deny(rust_2018_idioms)] -#![no_std] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg", - html_favicon_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg" -)] -//deny_warnings_placeholder_for_ci -#![allow(clippy::inline_always)] - -use cortex_m::{interrupt::InterruptNumber, peripheral::NVIC}; -pub use cortex_m_rtic_macros::app; -pub use rtic_core::{prelude as mutex_prelude, Exclusive, Mutex}; -pub use rtic_monotonic::{self, Monotonic}; - -/// module `mutex::prelude` provides `Mutex` and multi-lock variants. Recommended over `mutex_prelude` -pub mod mutex { - pub use rtic_core::prelude; - pub use rtic_core::Mutex; -} - -#[doc(hidden)] -pub mod export; -#[doc(hidden)] -mod tq; - -/// Sets the given `interrupt` as pending -/// -/// This is a convenience function around -/// [`NVIC::pend`](../cortex_m/peripheral/struct.NVIC.html#method.pend) -pub fn pend(interrupt: I) -where - I: InterruptNumber, -{ - NVIC::pend(interrupt); +pub fn add(left: usize, right: usize) -> usize { + left + right } -use core::cell::UnsafeCell; +#[cfg(test)] +mod tests { + use super::*; -/// Internal replacement for `static mut T` -/// -/// Used to represent RTIC Resources -/// -/// Soundness: -/// 1) Unsafe API for internal use only -/// 2) ``get_mut(&self) -> *mut T`` -/// returns a raw mutable pointer to the inner T -/// casting to &mut T is under control of RTIC -/// RTIC ensures &mut T to be unique under Rust aliasing rules. -/// -/// Implementation uses the underlying ``UnsafeCell`` -/// self.0.get() -> *mut T -/// -/// 3) get(&self) -> *const T -/// returns a raw immutable (const) pointer to the inner T -/// casting to &T is under control of RTIC -/// RTIC ensures &T to be shared under Rust aliasing rules. -/// -/// Implementation uses the underlying ``UnsafeCell`` -/// self.0.get() -> *mut T, demoted to *const T -/// -#[repr(transparent)] -pub struct RacyCell(UnsafeCell); - -impl RacyCell { - /// Create a ``RacyCell`` - #[inline(always)] - pub const fn new(value: T) -> Self { - RacyCell(UnsafeCell::new(value)) - } - - /// Get `*mut T` - /// - /// # Safety - /// - /// See documentation notes for [`RacyCell`] - #[inline(always)] - pub unsafe fn get_mut(&self) -> *mut T { - self.0.get() - } - - /// Get `*const T` - /// - /// # Safety - /// - /// See documentation notes for [`RacyCell`] - #[inline(always)] - pub unsafe fn get(&self) -> *const T { - self.0.get() + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); } } - -unsafe impl Sync for RacyCell {} -- cgit v1.2.3 From 582c602912592ec7ebea3096aefa02aea99c2143 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 2 Jan 2023 14:34:05 +0100 Subject: Old xtask test pass --- src/export.rs | 134 ++++++++++++++++++- src/lib.rs | 129 ++++++++++++++++-- src/sll.rs | 421 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ src/tq.rs | 275 +++++++++++++++++++++++++++++--------- 4 files changed, 883 insertions(+), 76 deletions(-) create mode 100644 src/sll.rs (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 6f2a1b6..da4a691 100644 --- a/src/export.rs +++ b/src/export.rs @@ -1,11 +1,13 @@ #![allow(clippy::inline_always)] +pub use crate::{ + sll::{IntrusiveSortedLinkedList, Node as IntrusiveNode}, + tq::{TaskNotReady, TimerQueue, WakerNotReady}, +}; +pub use bare_metal::CriticalSection; use core::{ cell::Cell, sync::atomic::{AtomicBool, Ordering}, }; - -pub use crate::tq::{NotReady, TimerQueue}; -pub use bare_metal::CriticalSection; pub use cortex_m::{ asm::nop, asm::wfi, @@ -16,10 +18,134 @@ pub use cortex_m::{ pub use heapless::sorted_linked_list::SortedLinkedList; pub use heapless::spsc::Queue; pub use heapless::BinaryHeap; +pub use heapless::Vec; pub use rtic_monotonic as monotonic; +pub mod idle_executor { + use core::{ + future::Future, + pin::Pin, + task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, + }; + + fn no_op(_: *const ()) {} + fn no_op_clone(_: *const ()) -> RawWaker { + noop_raw_waker() + } + + static IDLE_WAKER_TABLE: RawWakerVTable = RawWakerVTable::new(no_op_clone, no_op, no_op, no_op); + + #[inline] + fn noop_raw_waker() -> RawWaker { + RawWaker::new(core::ptr::null(), &IDLE_WAKER_TABLE) + } + + pub struct IdleExecutor + where + T: Future, + { + idle: T, + } + + impl IdleExecutor + where + T: Future, + { + #[inline(always)] + pub fn new(idle: T) -> Self { + Self { idle } + } + + #[inline(always)] + pub fn run(&mut self) -> ! { + let w = unsafe { Waker::from_raw(noop_raw_waker()) }; + let mut ctxt = Context::from_waker(&w); + loop { + match unsafe { Pin::new_unchecked(&mut self.idle) }.poll(&mut ctxt) { + Poll::Pending => { + // All ok! + } + Poll::Ready(_) => { + // The idle executor will never return + unreachable!() + } + } + } + } + } +} + +pub mod executor { + use core::{ + future::Future, + mem, + pin::Pin, + task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, + }; + + static WAKER_VTABLE: RawWakerVTable = + RawWakerVTable::new(waker_clone, waker_wake, waker_wake, waker_drop); + + unsafe fn waker_clone(p: *const ()) -> RawWaker { + RawWaker::new(p, &WAKER_VTABLE) + } + + unsafe fn waker_wake(p: *const ()) { + // The only thing we need from a waker is the function to call to pend the async + // dispatcher. + let f: fn() = mem::transmute(p); + f(); + } + + unsafe fn waker_drop(_: *const ()) { + // nop + } + + //============ + // AsyncTaskExecutor + + pub struct AsyncTaskExecutor { + task: Option, + } + + impl AsyncTaskExecutor { + pub const fn new() -> Self { + Self { task: None } + } + + pub fn is_running(&self) -> bool { + self.task.is_some() + } + + pub fn spawn(&mut self, future: F) { + self.task = Some(future); + } + + pub fn poll(&mut self, wake: fn()) -> bool { + if let Some(future) = &mut self.task { + unsafe { + let waker = Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE)); + let mut cx = Context::from_waker(&waker); + let future = Pin::new_unchecked(future); + + match future.poll(&mut cx) { + Poll::Ready(_) => { + self.task = None; + true // Only true if we finished now + } + Poll::Pending => false, + } + } + } else { + false + } + } + } +} + pub type SCFQ = Queue; pub type SCRQ = Queue<(T, u8), N>; +pub type ASYNCRQ = Queue; /// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23). /// It needs to be large enough to cover all the relevant interrupts in use. @@ -117,7 +243,7 @@ impl Priority { /// /// Will overwrite the current Priority #[inline(always)] - pub unsafe fn new(value: u8) -> Self { + pub const unsafe fn new(value: u8) -> Self { Priority { inner: Cell::new(value), } diff --git a/src/lib.rs b/src/lib.rs index 7d12d9a..da556a5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,14 +1,125 @@ -pub fn add(left: usize, right: usize) -> usize { - left + right +//! Real-Time Interrupt-driven Concurrency (RTIC) framework for ARM Cortex-M microcontrollers. +//! +//! **IMPORTANT**: This crate is published as [`cortex-m-rtic`] on crates.io but the name of the +//! library is `rtic`. +//! +//! [`cortex-m-rtic`]: https://crates.io/crates/cortex-m-rtic +//! +//! The user level documentation can be found [here]. +//! +//! [here]: https://rtic.rs +//! +//! Don't forget to check the documentation of the `#[app]` attribute (listed under the reexports +//! section), which is the main component of the framework. +//! +//! # Minimum Supported Rust Version (MSRV) +//! +//! This crate is compiled and tested with the latest toolchain (rolling) as of the release date. +//! If you run into compilation errors, try the latest stable release of the rust toolchain. +//! +//! # Semantic Versioning +//! +//! Like the Rust project, this crate adheres to [SemVer]: breaking changes in the API and semantics +//! require a *semver bump* (since 1.0.0 a new major version release), with the exception of breaking changes +//! that fix soundness issues -- those are considered bug fixes and can be landed in a new patch +//! release. +//! +//! [SemVer]: https://semver.org/spec/v2.0.0.html + +#![deny(missing_docs)] +#![deny(rust_2021_compatibility)] +#![deny(rust_2018_compatibility)] +#![deny(rust_2018_idioms)] +#![no_std] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg", + html_favicon_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg" +)] +//deny_warnings_placeholder_for_ci +#![allow(clippy::inline_always)] + +use cortex_m::{interrupt::InterruptNumber, peripheral::NVIC}; +pub use rtic_core::{prelude as mutex_prelude, Exclusive, Mutex}; +pub use rtic_macros::app; +pub use rtic_monotonic::{self, Monotonic}; + +/// module `mutex::prelude` provides `Mutex` and multi-lock variants. Recommended over `mutex_prelude` +pub mod mutex { + pub use rtic_core::prelude; + pub use rtic_core::Mutex; +} + +#[doc(hidden)] +pub mod export; +#[doc(hidden)] +pub mod sll; +#[doc(hidden)] +mod tq; + +/// Sets the given `interrupt` as pending +/// +/// This is a convenience function around +/// [`NVIC::pend`](../cortex_m/peripheral/struct.NVIC.html#method.pend) +pub fn pend(interrupt: I) +where + I: InterruptNumber, +{ + NVIC::pend(interrupt); } -#[cfg(test)] -mod tests { - use super::*; +use core::cell::UnsafeCell; - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); +/// Internal replacement for `static mut T` +/// +/// Used to represent RTIC Resources +/// +/// Soundness: +/// 1) Unsafe API for internal use only +/// 2) ``get_mut(&self) -> *mut T`` +/// returns a raw mutable pointer to the inner T +/// casting to &mut T is under control of RTIC +/// RTIC ensures &mut T to be unique under Rust aliasing rules. +/// +/// Implementation uses the underlying ``UnsafeCell`` +/// self.0.get() -> *mut T +/// +/// 3) get(&self) -> *const T +/// returns a raw immutable (const) pointer to the inner T +/// casting to &T is under control of RTIC +/// RTIC ensures &T to be shared under Rust aliasing rules. +/// +/// Implementation uses the underlying ``UnsafeCell`` +/// self.0.get() -> *mut T, demoted to *const T +/// +#[repr(transparent)] +pub struct RacyCell(UnsafeCell); + +impl RacyCell { + /// Create a ``RacyCell`` + #[inline(always)] + pub const fn new(value: T) -> Self { + RacyCell(UnsafeCell::new(value)) + } + + /// Get `*mut T` + /// + /// # Safety + /// + /// See documentation notes for [`RacyCell`] + #[inline(always)] + pub unsafe fn get_mut(&self) -> *mut T { + self.0.get() + } + + /// Get `*const T` + /// + /// # Safety + /// + /// See documentation notes for [`RacyCell`] + #[inline(always)] + pub unsafe fn get(&self) -> *const T { + self.0.get() } } + +unsafe impl Sync for RacyCell {} diff --git a/src/sll.rs b/src/sll.rs new file mode 100644 index 0000000..43b53c1 --- /dev/null +++ b/src/sll.rs @@ -0,0 +1,421 @@ +//! An intrusive sorted priority linked list, designed for use in `Future`s in RTIC. +use core::cmp::Ordering; +use core::fmt; +use core::marker::PhantomData; +use core::ops::{Deref, DerefMut}; +use core::ptr::NonNull; + +/// Marker for Min sorted [`IntrusiveSortedLinkedList`]. +pub struct Min; + +/// Marker for Max sorted [`IntrusiveSortedLinkedList`]. +pub struct Max; + +/// The linked list kind: min-list or max-list +pub trait Kind: private::Sealed { + #[doc(hidden)] + fn ordering() -> Ordering; +} + +impl Kind for Min { + fn ordering() -> Ordering { + Ordering::Less + } +} + +impl Kind for Max { + fn ordering() -> Ordering { + Ordering::Greater + } +} + +/// Sealed traits +mod private { + pub trait Sealed {} +} + +impl private::Sealed for Max {} +impl private::Sealed for Min {} + +/// A node in the [`IntrusiveSortedLinkedList`]. +pub struct Node { + pub val: T, + next: Option>>, +} + +impl Node { + pub fn new(val: T) -> Self { + Self { val, next: None } + } +} + +/// The linked list. +pub struct IntrusiveSortedLinkedList<'a, T, K> { + head: Option>>, + _kind: PhantomData, + _lt: PhantomData<&'a ()>, +} + +impl<'a, T, K> fmt::Debug for IntrusiveSortedLinkedList<'a, T, K> +where + T: Ord + core::fmt::Debug, + K: Kind, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut l = f.debug_list(); + let mut current = self.head; + + while let Some(head) = current { + let head = unsafe { head.as_ref() }; + current = head.next; + + l.entry(&head.val); + } + + l.finish() + } +} + +impl<'a, T, K> IntrusiveSortedLinkedList<'a, T, K> +where + T: Ord, + K: Kind, +{ + pub const fn new() -> Self { + Self { + head: None, + _kind: PhantomData, + _lt: PhantomData, + } + } + + // Push to the list. + pub fn push(&mut self, new: &'a mut Node) { + unsafe { + if let Some(head) = self.head { + if head.as_ref().val.cmp(&new.val) != K::ordering() { + // This is newer than head, replace head + new.next = self.head; + self.head = Some(NonNull::new_unchecked(new)); + } else { + // It's not head, search the list for the correct placement + let mut current = head; + + while let Some(next) = current.as_ref().next { + if next.as_ref().val.cmp(&new.val) != K::ordering() { + break; + } + + current = next; + } + + new.next = current.as_ref().next; + current.as_mut().next = Some(NonNull::new_unchecked(new)); + } + } else { + // List is empty, place at head + self.head = Some(NonNull::new_unchecked(new)) + } + } + } + + /// Get an iterator over the sorted list. + pub fn iter(&self) -> Iter<'_, T, K> { + Iter { + _list: self, + index: self.head, + } + } + + /// Find an element in the list that can be changed and resorted. + pub fn find_mut(&mut self, mut f: F) -> Option> + where + F: FnMut(&T) -> bool, + { + let head = self.head?; + + // Special-case, first element + if f(&unsafe { head.as_ref() }.val) { + return Some(FindMut { + is_head: true, + prev_index: None, + index: self.head, + list: self, + maybe_changed: false, + }); + } + + let mut current = head; + + while let Some(next) = unsafe { current.as_ref() }.next { + if f(&unsafe { next.as_ref() }.val) { + return Some(FindMut { + is_head: false, + prev_index: Some(current), + index: Some(next), + list: self, + maybe_changed: false, + }); + } + + current = next; + } + + None + } + + /// Peek at the first element. + pub fn peek(&self) -> Option<&T> { + self.head.map(|head| unsafe { &head.as_ref().val }) + } + + /// Pops the first element in the list. + /// + /// Complexity is worst-case `O(1)`. + pub fn pop(&mut self) -> Option<&'a Node> { + if let Some(head) = self.head { + let v = unsafe { head.as_ref() }; + self.head = v.next; + Some(v) + } else { + None + } + } + + /// Checks if the linked list is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.head.is_none() + } +} + +/// Iterator for the linked list. +pub struct Iter<'a, T, K> +where + T: Ord, + K: Kind, +{ + _list: &'a IntrusiveSortedLinkedList<'a, T, K>, + index: Option>>, +} + +impl<'a, T, K> Iterator for Iter<'a, T, K> +where + T: Ord, + K: Kind, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + let index = self.index?; + + let node = unsafe { index.as_ref() }; + self.index = node.next; + + Some(&node.val) + } +} + +/// Comes from [`IntrusiveSortedLinkedList::find_mut`]. +pub struct FindMut<'a, 'b, T, K> +where + T: Ord + 'b, + K: Kind, +{ + list: &'a mut IntrusiveSortedLinkedList<'b, T, K>, + is_head: bool, + prev_index: Option>>, + index: Option>>, + maybe_changed: bool, +} + +impl<'a, 'b, T, K> FindMut<'a, 'b, T, K> +where + T: Ord, + K: Kind, +{ + unsafe fn pop_internal(&mut self) -> &'b mut Node { + if self.is_head { + // If it is the head element, we can do a normal pop + let mut head = self.list.head.unwrap_unchecked(); + let v = head.as_mut(); + self.list.head = v.next; + v + } else { + // Somewhere in the list + let mut prev = self.prev_index.unwrap_unchecked(); + let mut curr = self.index.unwrap_unchecked(); + + // Re-point the previous index + prev.as_mut().next = curr.as_ref().next; + + curr.as_mut() + } + } + + /// This will pop the element from the list. + /// + /// Complexity is worst-case `O(1)`. + #[inline] + pub fn pop(mut self) -> &'b mut Node { + unsafe { self.pop_internal() } + } + + /// This will resort the element into the correct position in the list if needed. The resorting + /// will only happen if the element has been accessed mutably. + /// + /// Same as calling `drop`. + /// + /// Complexity is worst-case `O(N)`. + #[inline] + pub fn finish(self) { + drop(self) + } +} + +impl<'b, T, K> Drop for FindMut<'_, 'b, T, K> +where + T: Ord + 'b, + K: Kind, +{ + fn drop(&mut self) { + // Only resort the list if the element has changed + if self.maybe_changed { + unsafe { + let val = self.pop_internal(); + self.list.push(val); + } + } + } +} + +impl Deref for FindMut<'_, '_, T, K> +where + T: Ord, + K: Kind, +{ + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &self.index.unwrap_unchecked().as_ref().val } + } +} + +impl DerefMut for FindMut<'_, '_, T, K> +where + T: Ord, + K: Kind, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + self.maybe_changed = true; + unsafe { &mut self.index.unwrap_unchecked().as_mut().val } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn const_new() { + static mut _V1: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + } + + #[test] + fn test_peek() { + let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + + let mut a = Node { val: 1, next: None }; + ll.push(&mut a); + assert_eq!(ll.peek().unwrap(), &1); + + let mut a = Node { val: 2, next: None }; + ll.push(&mut a); + assert_eq!(ll.peek().unwrap(), &2); + + let mut a = Node { val: 3, next: None }; + ll.push(&mut a); + assert_eq!(ll.peek().unwrap(), &3); + + let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + + let mut a = Node { val: 2, next: None }; + ll.push(&mut a); + assert_eq!(ll.peek().unwrap(), &2); + + let mut a = Node { val: 1, next: None }; + ll.push(&mut a); + assert_eq!(ll.peek().unwrap(), &1); + + let mut a = Node { val: 3, next: None }; + ll.push(&mut a); + assert_eq!(ll.peek().unwrap(), &1); + } + + #[test] + fn test_empty() { + let ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + + assert!(ll.is_empty()) + } + + #[test] + fn test_updating() { + let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + + let mut a = Node { val: 1, next: None }; + ll.push(&mut a); + + let mut a = Node { val: 2, next: None }; + ll.push(&mut a); + + let mut a = Node { val: 3, next: None }; + ll.push(&mut a); + + let mut find = ll.find_mut(|v| *v == 2).unwrap(); + + *find += 1000; + find.finish(); + + assert_eq!(ll.peek().unwrap(), &1002); + + let mut find = ll.find_mut(|v| *v == 3).unwrap(); + + *find += 1000; + find.finish(); + + assert_eq!(ll.peek().unwrap(), &1003); + + // Remove largest element + ll.find_mut(|v| *v == 1003).unwrap().pop(); + + assert_eq!(ll.peek().unwrap(), &1002); + } + + #[test] + fn test_updating_1() { + let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + + let mut a = Node { val: 1, next: None }; + ll.push(&mut a); + + let v = ll.pop().unwrap(); + + assert_eq!(v.val, 1); + } + + #[test] + fn test_updating_2() { + let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); + + let mut a = Node { val: 1, next: None }; + ll.push(&mut a); + + let mut find = ll.find_mut(|v| *v == 1).unwrap(); + + *find += 1000; + find.finish(); + + assert_eq!(ll.peek().unwrap(), &1001); + } +} diff --git a/src/tq.rs b/src/tq.rs index 0f585ba..daa91c8 100644 --- a/src/tq.rs +++ b/src/tq.rs @@ -1,29 +1,28 @@ -use crate::Monotonic; +use crate::{ + sll::{IntrusiveSortedLinkedList, Min as IsslMin, Node as IntrusiveNode}, + Monotonic, +}; use core::cmp::Ordering; -use heapless::sorted_linked_list::{LinkedIndexU16, Min, SortedLinkedList}; +use core::task::Waker; +use heapless::sorted_linked_list::{LinkedIndexU16, Min as SllMin, SortedLinkedList}; -pub struct TimerQueue( - pub SortedLinkedList, LinkedIndexU16, Min, N>, -) +pub struct TimerQueue<'a, Mono, Task, const N_TASK: usize> where Mono: Monotonic, - Task: Copy; + Task: Copy, +{ + pub task_queue: SortedLinkedList, LinkedIndexU16, SllMin, N_TASK>, + pub waker_queue: IntrusiveSortedLinkedList<'a, WakerNotReady, IsslMin>, +} -impl TimerQueue +impl<'a, Mono, Task, const N_TASK: usize> TimerQueue<'a, Mono, Task, N_TASK> where - Mono: Monotonic, + Mono: Monotonic + 'a, Task: Copy, { - /// # Safety - /// - /// Writing to memory with a transmute in order to enable - /// interrupts of the ``SysTick`` timer - /// - /// Enqueue a task without checking if it is full - #[inline] - pub unsafe fn enqueue_unchecked( - &mut self, - nr: NotReady, + fn check_if_enable( + &self, + instant: Mono::Instant, enable_interrupt: F1, pend_handler: F2, mono: Option<&mut Mono>, @@ -33,11 +32,17 @@ where { // Check if the top contains a non-empty element and if that element is // greater than nr - let if_heap_max_greater_than_nr = - self.0.peek().map_or(true, |head| nr.instant < head.instant); + let if_task_heap_max_greater_than_nr = self + .task_queue + .peek() + .map_or(true, |head| instant < head.instant); + let if_waker_heap_max_greater_than_nr = self + .waker_queue + .peek() + .map_or(true, |head| instant < head.instant); - if if_heap_max_greater_than_nr { - if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE && self.0.is_empty() { + if if_task_heap_max_greater_than_nr || if_waker_heap_max_greater_than_nr { + if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE && self.is_empty() { if let Some(mono) = mono { mono.enable_timer(); } @@ -46,19 +51,49 @@ where pend_handler(); } + } - self.0.push_unchecked(nr); + /// Enqueue a task without checking if it is full + #[inline] + pub unsafe fn enqueue_task_unchecked( + &mut self, + nr: TaskNotReady, + enable_interrupt: F1, + pend_handler: F2, + mono: Option<&mut Mono>, + ) where + F1: FnOnce(), + F2: FnOnce(), + { + self.check_if_enable(nr.instant, enable_interrupt, pend_handler, mono); + self.task_queue.push_unchecked(nr); } - /// Check if the timer queue is empty. + /// Enqueue a waker + #[inline] + pub fn enqueue_waker( + &mut self, + nr: &'a mut IntrusiveNode>, + enable_interrupt: F1, + pend_handler: F2, + mono: Option<&mut Mono>, + ) where + F1: FnOnce(), + F2: FnOnce(), + { + self.check_if_enable(nr.val.instant, enable_interrupt, pend_handler, mono); + self.waker_queue.push(nr); + } + + /// Check if all the timer queue is empty. #[inline] pub fn is_empty(&self) -> bool { - self.0.is_empty() + self.task_queue.is_empty() && self.waker_queue.is_empty() } - /// Cancel the marker value - pub fn cancel_marker(&mut self, marker: u32) -> Option<(Task, u8)> { - if let Some(val) = self.0.find_mut(|nr| nr.marker == marker) { + /// Cancel the marker value for a task + pub fn cancel_task_marker(&mut self, marker: u32) -> Option<(Task, u8)> { + if let Some(val) = self.task_queue.find_mut(|nr| nr.marker == marker) { let nr = val.pop(); Some((nr.task, nr.index)) @@ -67,16 +102,23 @@ where } } - /// Update the instant at an marker value to a new instant + /// Cancel the marker value for a waker + pub fn cancel_waker_marker(&mut self, marker: u32) { + if let Some(val) = self.waker_queue.find_mut(|nr| nr.marker == marker) { + let _ = val.pop(); + } + } + + /// Update the instant at an marker value for a task to a new instant #[allow(clippy::result_unit_err)] - pub fn update_marker( + pub fn update_task_marker( &mut self, marker: u32, new_marker: u32, instant: Mono::Instant, pend_handler: F, ) -> Result<(), ()> { - if let Some(mut val) = self.0.find_mut(|nr| nr.marker == marker) { + if let Some(mut val) = self.task_queue.find_mut(|nr| nr.marker == marker) { val.instant = instant; val.marker = new_marker; @@ -89,6 +131,62 @@ where } } + fn dequeue_task_queue( + &mut self, + instant: Mono::Instant, + mono: &mut Mono, + ) -> Option<(Task, u8)> { + if instant <= mono.now() { + // task became ready + let nr = unsafe { self.task_queue.pop_unchecked() }; + Some((nr.task, nr.index)) + } else { + // Set compare + mono.set_compare(instant); + + // Double check that the instant we set is really in the future, else + // dequeue. If the monotonic is fast enough it can happen that from the + // read of now to the set of the compare, the time can overflow. This is to + // guard against this. + if instant <= mono.now() { + let nr = unsafe { self.task_queue.pop_unchecked() }; + Some((nr.task, nr.index)) + } else { + None + } + } + } + + fn dequeue_waker_queue(&mut self, instant: Mono::Instant, mono: &mut Mono) -> bool { + let mut did_wake = false; + + if instant <= mono.now() { + // Task became ready, wake the waker + if let Some(v) = self.waker_queue.pop() { + v.val.waker.wake_by_ref(); + + did_wake = true; + } + } else { + // Set compare + mono.set_compare(instant); + + // Double check that the instant we set is really in the future, else + // dequeue. If the monotonic is fast enough it can happen that from the + // read of now to the set of the compare, the time can overflow. This is to + // guard against this. + if instant <= mono.now() { + if let Some(v) = self.waker_queue.pop() { + v.val.waker.wake_by_ref(); + + did_wake = true; + } + } + } + + did_wake + } + /// Dequeue a task from the ``TimerQueue`` pub fn dequeue(&mut self, disable_interrupt: F, mono: &mut Mono) -> Option<(Task, u8)> where @@ -96,59 +194,72 @@ where { mono.clear_compare_flag(); - if let Some(instant) = self.0.peek().map(|p| p.instant) { - if instant <= mono.now() { - // task became ready - let nr = unsafe { self.0.pop_unchecked() }; + loop { + let tq = self.task_queue.peek().map(|p| p.instant); + let wq = self.waker_queue.peek().map(|p| p.instant); - Some((nr.task, nr.index)) - } else { - // Set compare - mono.set_compare(instant); - - // Double check that the instant we set is really in the future, else - // dequeue. If the monotonic is fast enough it can happen that from the - // read of now to the set of the compare, the time can overflow. This is to - // guard against this. - if instant <= mono.now() { - let nr = unsafe { self.0.pop_unchecked() }; - - Some((nr.task, nr.index)) - } else { - None + let dequeue_task; + let instant; + + match (tq, wq) { + (Some(tq_instant), Some(wq_instant)) => { + if tq_instant <= wq_instant { + dequeue_task = true; + instant = tq_instant; + } else { + dequeue_task = false; + instant = wq_instant; + } + } + (Some(tq_instant), None) => { + dequeue_task = true; + instant = tq_instant; + } + (None, Some(wq_instant)) => { + dequeue_task = false; + instant = wq_instant; + } + (None, None) => { + // The queue is empty, disable the interrupt. + if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE { + disable_interrupt(); + mono.disable_timer(); + } + + return None; } - } - } else { - // The queue is empty, disable the interrupt. - if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE { - disable_interrupt(); - mono.disable_timer(); } - None + if dequeue_task { + return self.dequeue_task_queue(instant, mono); + } else if !self.dequeue_waker_queue(instant, mono) { + return None; + } else { + // Run the dequeue again + } } } } -pub struct NotReady +pub struct TaskNotReady where Task: Copy, Mono: Monotonic, { + pub task: Task, pub index: u8, pub instant: Mono::Instant, - pub task: Task, pub marker: u32, } -impl Eq for NotReady +impl Eq for TaskNotReady where Task: Copy, Mono: Monotonic, { } -impl Ord for NotReady +impl Ord for TaskNotReady where Task: Copy, Mono: Monotonic, @@ -158,7 +269,7 @@ where } } -impl PartialEq for NotReady +impl PartialEq for TaskNotReady where Task: Copy, Mono: Monotonic, @@ -168,7 +279,7 @@ where } } -impl PartialOrd for NotReady +impl PartialOrd for TaskNotReady where Task: Copy, Mono: Monotonic, @@ -177,3 +288,41 @@ where Some(self.cmp(other)) } } + +pub struct WakerNotReady +where + Mono: Monotonic, +{ + pub waker: Waker, + pub instant: Mono::Instant, + pub marker: u32, +} + +impl Eq for WakerNotReady where Mono: Monotonic {} + +impl Ord for WakerNotReady +where + Mono: Monotonic, +{ + fn cmp(&self, other: &Self) -> Ordering { + self.instant.cmp(&other.instant) + } +} + +impl PartialEq for WakerNotReady +where + Mono: Monotonic, +{ + fn eq(&self, other: &Self) -> bool { + self.instant == other.instant + } +} + +impl PartialOrd for WakerNotReady +where + Mono: Monotonic, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} -- cgit v1.2.3 From 858320cbfc391a74bff6b9c8a0b3c7696a232b76 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Wed, 4 Jan 2023 21:08:44 +0100 Subject: Even more cleanup --- src/export.rs | 70 ----------------------------------------------------------- 1 file changed, 70 deletions(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index da4a691..82320fb 100644 --- a/src/export.rs +++ b/src/export.rs @@ -15,65 +15,6 @@ pub use cortex_m::{ peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST}, Peripherals, }; -pub use heapless::sorted_linked_list::SortedLinkedList; -pub use heapless::spsc::Queue; -pub use heapless::BinaryHeap; -pub use heapless::Vec; -pub use rtic_monotonic as monotonic; - -pub mod idle_executor { - use core::{ - future::Future, - pin::Pin, - task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, - }; - - fn no_op(_: *const ()) {} - fn no_op_clone(_: *const ()) -> RawWaker { - noop_raw_waker() - } - - static IDLE_WAKER_TABLE: RawWakerVTable = RawWakerVTable::new(no_op_clone, no_op, no_op, no_op); - - #[inline] - fn noop_raw_waker() -> RawWaker { - RawWaker::new(core::ptr::null(), &IDLE_WAKER_TABLE) - } - - pub struct IdleExecutor - where - T: Future, - { - idle: T, - } - - impl IdleExecutor - where - T: Future, - { - #[inline(always)] - pub fn new(idle: T) -> Self { - Self { idle } - } - - #[inline(always)] - pub fn run(&mut self) -> ! { - let w = unsafe { Waker::from_raw(noop_raw_waker()) }; - let mut ctxt = Context::from_waker(&w); - loop { - match unsafe { Pin::new_unchecked(&mut self.idle) }.poll(&mut ctxt) { - Poll::Pending => { - // All ok! - } - Poll::Ready(_) => { - // The idle executor will never return - unreachable!() - } - } - } - } - } -} pub mod executor { use core::{ @@ -143,10 +84,6 @@ pub mod executor { } } -pub type SCFQ = Queue; -pub type SCRQ = Queue<(T, u8), N>; -pub type ASYNCRQ = Queue; - /// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23). /// It needs to be large enough to cover all the relevant interrupts in use. /// For M0/M0+ there are only 32 interrupts so we only need one u32 value. @@ -290,13 +227,6 @@ where { } -#[inline(always)] -pub fn assert_monotonic() -where - T: monotonic::Monotonic, -{ -} - /// Lock implementation using BASEPRI and global Critical Section (CS) /// /// # Safety -- cgit v1.2.3 From 53f3d397e76383deabbe9579a3522174c422a958 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Wed, 4 Jan 2023 21:36:43 +0100 Subject: More removal --- src/export.rs | 5 - src/lib.rs | 4 - src/sll.rs | 421 ---------------------------------------------------------- src/tq.rs | 328 --------------------------------------------- 4 files changed, 758 deletions(-) delete mode 100644 src/sll.rs delete mode 100644 src/tq.rs (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 82320fb..2cc031e 100644 --- a/src/export.rs +++ b/src/export.rs @@ -1,8 +1,3 @@ -#![allow(clippy::inline_always)] -pub use crate::{ - sll::{IntrusiveSortedLinkedList, Node as IntrusiveNode}, - tq::{TaskNotReady, TimerQueue, WakerNotReady}, -}; pub use bare_metal::CriticalSection; use core::{ cell::Cell, diff --git a/src/lib.rs b/src/lib.rs index da556a5..e8b8140 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,10 +51,6 @@ pub mod mutex { #[doc(hidden)] pub mod export; -#[doc(hidden)] -pub mod sll; -#[doc(hidden)] -mod tq; /// Sets the given `interrupt` as pending /// diff --git a/src/sll.rs b/src/sll.rs deleted file mode 100644 index 43b53c1..0000000 --- a/src/sll.rs +++ /dev/null @@ -1,421 +0,0 @@ -//! An intrusive sorted priority linked list, designed for use in `Future`s in RTIC. -use core::cmp::Ordering; -use core::fmt; -use core::marker::PhantomData; -use core::ops::{Deref, DerefMut}; -use core::ptr::NonNull; - -/// Marker for Min sorted [`IntrusiveSortedLinkedList`]. -pub struct Min; - -/// Marker for Max sorted [`IntrusiveSortedLinkedList`]. -pub struct Max; - -/// The linked list kind: min-list or max-list -pub trait Kind: private::Sealed { - #[doc(hidden)] - fn ordering() -> Ordering; -} - -impl Kind for Min { - fn ordering() -> Ordering { - Ordering::Less - } -} - -impl Kind for Max { - fn ordering() -> Ordering { - Ordering::Greater - } -} - -/// Sealed traits -mod private { - pub trait Sealed {} -} - -impl private::Sealed for Max {} -impl private::Sealed for Min {} - -/// A node in the [`IntrusiveSortedLinkedList`]. -pub struct Node { - pub val: T, - next: Option>>, -} - -impl Node { - pub fn new(val: T) -> Self { - Self { val, next: None } - } -} - -/// The linked list. -pub struct IntrusiveSortedLinkedList<'a, T, K> { - head: Option>>, - _kind: PhantomData, - _lt: PhantomData<&'a ()>, -} - -impl<'a, T, K> fmt::Debug for IntrusiveSortedLinkedList<'a, T, K> -where - T: Ord + core::fmt::Debug, - K: Kind, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut l = f.debug_list(); - let mut current = self.head; - - while let Some(head) = current { - let head = unsafe { head.as_ref() }; - current = head.next; - - l.entry(&head.val); - } - - l.finish() - } -} - -impl<'a, T, K> IntrusiveSortedLinkedList<'a, T, K> -where - T: Ord, - K: Kind, -{ - pub const fn new() -> Self { - Self { - head: None, - _kind: PhantomData, - _lt: PhantomData, - } - } - - // Push to the list. - pub fn push(&mut self, new: &'a mut Node) { - unsafe { - if let Some(head) = self.head { - if head.as_ref().val.cmp(&new.val) != K::ordering() { - // This is newer than head, replace head - new.next = self.head; - self.head = Some(NonNull::new_unchecked(new)); - } else { - // It's not head, search the list for the correct placement - let mut current = head; - - while let Some(next) = current.as_ref().next { - if next.as_ref().val.cmp(&new.val) != K::ordering() { - break; - } - - current = next; - } - - new.next = current.as_ref().next; - current.as_mut().next = Some(NonNull::new_unchecked(new)); - } - } else { - // List is empty, place at head - self.head = Some(NonNull::new_unchecked(new)) - } - } - } - - /// Get an iterator over the sorted list. - pub fn iter(&self) -> Iter<'_, T, K> { - Iter { - _list: self, - index: self.head, - } - } - - /// Find an element in the list that can be changed and resorted. - pub fn find_mut(&mut self, mut f: F) -> Option> - where - F: FnMut(&T) -> bool, - { - let head = self.head?; - - // Special-case, first element - if f(&unsafe { head.as_ref() }.val) { - return Some(FindMut { - is_head: true, - prev_index: None, - index: self.head, - list: self, - maybe_changed: false, - }); - } - - let mut current = head; - - while let Some(next) = unsafe { current.as_ref() }.next { - if f(&unsafe { next.as_ref() }.val) { - return Some(FindMut { - is_head: false, - prev_index: Some(current), - index: Some(next), - list: self, - maybe_changed: false, - }); - } - - current = next; - } - - None - } - - /// Peek at the first element. - pub fn peek(&self) -> Option<&T> { - self.head.map(|head| unsafe { &head.as_ref().val }) - } - - /// Pops the first element in the list. - /// - /// Complexity is worst-case `O(1)`. - pub fn pop(&mut self) -> Option<&'a Node> { - if let Some(head) = self.head { - let v = unsafe { head.as_ref() }; - self.head = v.next; - Some(v) - } else { - None - } - } - - /// Checks if the linked list is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.head.is_none() - } -} - -/// Iterator for the linked list. -pub struct Iter<'a, T, K> -where - T: Ord, - K: Kind, -{ - _list: &'a IntrusiveSortedLinkedList<'a, T, K>, - index: Option>>, -} - -impl<'a, T, K> Iterator for Iter<'a, T, K> -where - T: Ord, - K: Kind, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - let index = self.index?; - - let node = unsafe { index.as_ref() }; - self.index = node.next; - - Some(&node.val) - } -} - -/// Comes from [`IntrusiveSortedLinkedList::find_mut`]. -pub struct FindMut<'a, 'b, T, K> -where - T: Ord + 'b, - K: Kind, -{ - list: &'a mut IntrusiveSortedLinkedList<'b, T, K>, - is_head: bool, - prev_index: Option>>, - index: Option>>, - maybe_changed: bool, -} - -impl<'a, 'b, T, K> FindMut<'a, 'b, T, K> -where - T: Ord, - K: Kind, -{ - unsafe fn pop_internal(&mut self) -> &'b mut Node { - if self.is_head { - // If it is the head element, we can do a normal pop - let mut head = self.list.head.unwrap_unchecked(); - let v = head.as_mut(); - self.list.head = v.next; - v - } else { - // Somewhere in the list - let mut prev = self.prev_index.unwrap_unchecked(); - let mut curr = self.index.unwrap_unchecked(); - - // Re-point the previous index - prev.as_mut().next = curr.as_ref().next; - - curr.as_mut() - } - } - - /// This will pop the element from the list. - /// - /// Complexity is worst-case `O(1)`. - #[inline] - pub fn pop(mut self) -> &'b mut Node { - unsafe { self.pop_internal() } - } - - /// This will resort the element into the correct position in the list if needed. The resorting - /// will only happen if the element has been accessed mutably. - /// - /// Same as calling `drop`. - /// - /// Complexity is worst-case `O(N)`. - #[inline] - pub fn finish(self) { - drop(self) - } -} - -impl<'b, T, K> Drop for FindMut<'_, 'b, T, K> -where - T: Ord + 'b, - K: Kind, -{ - fn drop(&mut self) { - // Only resort the list if the element has changed - if self.maybe_changed { - unsafe { - let val = self.pop_internal(); - self.list.push(val); - } - } - } -} - -impl Deref for FindMut<'_, '_, T, K> -where - T: Ord, - K: Kind, -{ - type Target = T; - - fn deref(&self) -> &Self::Target { - unsafe { &self.index.unwrap_unchecked().as_ref().val } - } -} - -impl DerefMut for FindMut<'_, '_, T, K> -where - T: Ord, - K: Kind, -{ - fn deref_mut(&mut self) -> &mut Self::Target { - self.maybe_changed = true; - unsafe { &mut self.index.unwrap_unchecked().as_mut().val } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn const_new() { - static mut _V1: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - } - - #[test] - fn test_peek() { - let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - - let mut a = Node { val: 1, next: None }; - ll.push(&mut a); - assert_eq!(ll.peek().unwrap(), &1); - - let mut a = Node { val: 2, next: None }; - ll.push(&mut a); - assert_eq!(ll.peek().unwrap(), &2); - - let mut a = Node { val: 3, next: None }; - ll.push(&mut a); - assert_eq!(ll.peek().unwrap(), &3); - - let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - - let mut a = Node { val: 2, next: None }; - ll.push(&mut a); - assert_eq!(ll.peek().unwrap(), &2); - - let mut a = Node { val: 1, next: None }; - ll.push(&mut a); - assert_eq!(ll.peek().unwrap(), &1); - - let mut a = Node { val: 3, next: None }; - ll.push(&mut a); - assert_eq!(ll.peek().unwrap(), &1); - } - - #[test] - fn test_empty() { - let ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - - assert!(ll.is_empty()) - } - - #[test] - fn test_updating() { - let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - - let mut a = Node { val: 1, next: None }; - ll.push(&mut a); - - let mut a = Node { val: 2, next: None }; - ll.push(&mut a); - - let mut a = Node { val: 3, next: None }; - ll.push(&mut a); - - let mut find = ll.find_mut(|v| *v == 2).unwrap(); - - *find += 1000; - find.finish(); - - assert_eq!(ll.peek().unwrap(), &1002); - - let mut find = ll.find_mut(|v| *v == 3).unwrap(); - - *find += 1000; - find.finish(); - - assert_eq!(ll.peek().unwrap(), &1003); - - // Remove largest element - ll.find_mut(|v| *v == 1003).unwrap().pop(); - - assert_eq!(ll.peek().unwrap(), &1002); - } - - #[test] - fn test_updating_1() { - let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - - let mut a = Node { val: 1, next: None }; - ll.push(&mut a); - - let v = ll.pop().unwrap(); - - assert_eq!(v.val, 1); - } - - #[test] - fn test_updating_2() { - let mut ll: IntrusiveSortedLinkedList = IntrusiveSortedLinkedList::new(); - - let mut a = Node { val: 1, next: None }; - ll.push(&mut a); - - let mut find = ll.find_mut(|v| *v == 1).unwrap(); - - *find += 1000; - find.finish(); - - assert_eq!(ll.peek().unwrap(), &1001); - } -} diff --git a/src/tq.rs b/src/tq.rs deleted file mode 100644 index daa91c8..0000000 --- a/src/tq.rs +++ /dev/null @@ -1,328 +0,0 @@ -use crate::{ - sll::{IntrusiveSortedLinkedList, Min as IsslMin, Node as IntrusiveNode}, - Monotonic, -}; -use core::cmp::Ordering; -use core::task::Waker; -use heapless::sorted_linked_list::{LinkedIndexU16, Min as SllMin, SortedLinkedList}; - -pub struct TimerQueue<'a, Mono, Task, const N_TASK: usize> -where - Mono: Monotonic, - Task: Copy, -{ - pub task_queue: SortedLinkedList, LinkedIndexU16, SllMin, N_TASK>, - pub waker_queue: IntrusiveSortedLinkedList<'a, WakerNotReady, IsslMin>, -} - -impl<'a, Mono, Task, const N_TASK: usize> TimerQueue<'a, Mono, Task, N_TASK> -where - Mono: Monotonic + 'a, - Task: Copy, -{ - fn check_if_enable( - &self, - instant: Mono::Instant, - enable_interrupt: F1, - pend_handler: F2, - mono: Option<&mut Mono>, - ) where - F1: FnOnce(), - F2: FnOnce(), - { - // Check if the top contains a non-empty element and if that element is - // greater than nr - let if_task_heap_max_greater_than_nr = self - .task_queue - .peek() - .map_or(true, |head| instant < head.instant); - let if_waker_heap_max_greater_than_nr = self - .waker_queue - .peek() - .map_or(true, |head| instant < head.instant); - - if if_task_heap_max_greater_than_nr || if_waker_heap_max_greater_than_nr { - if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE && self.is_empty() { - if let Some(mono) = mono { - mono.enable_timer(); - } - enable_interrupt(); - } - - pend_handler(); - } - } - - /// Enqueue a task without checking if it is full - #[inline] - pub unsafe fn enqueue_task_unchecked( - &mut self, - nr: TaskNotReady, - enable_interrupt: F1, - pend_handler: F2, - mono: Option<&mut Mono>, - ) where - F1: FnOnce(), - F2: FnOnce(), - { - self.check_if_enable(nr.instant, enable_interrupt, pend_handler, mono); - self.task_queue.push_unchecked(nr); - } - - /// Enqueue a waker - #[inline] - pub fn enqueue_waker( - &mut self, - nr: &'a mut IntrusiveNode>, - enable_interrupt: F1, - pend_handler: F2, - mono: Option<&mut Mono>, - ) where - F1: FnOnce(), - F2: FnOnce(), - { - self.check_if_enable(nr.val.instant, enable_interrupt, pend_handler, mono); - self.waker_queue.push(nr); - } - - /// Check if all the timer queue is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.task_queue.is_empty() && self.waker_queue.is_empty() - } - - /// Cancel the marker value for a task - pub fn cancel_task_marker(&mut self, marker: u32) -> Option<(Task, u8)> { - if let Some(val) = self.task_queue.find_mut(|nr| nr.marker == marker) { - let nr = val.pop(); - - Some((nr.task, nr.index)) - } else { - None - } - } - - /// Cancel the marker value for a waker - pub fn cancel_waker_marker(&mut self, marker: u32) { - if let Some(val) = self.waker_queue.find_mut(|nr| nr.marker == marker) { - let _ = val.pop(); - } - } - - /// Update the instant at an marker value for a task to a new instant - #[allow(clippy::result_unit_err)] - pub fn update_task_marker( - &mut self, - marker: u32, - new_marker: u32, - instant: Mono::Instant, - pend_handler: F, - ) -> Result<(), ()> { - if let Some(mut val) = self.task_queue.find_mut(|nr| nr.marker == marker) { - val.instant = instant; - val.marker = new_marker; - - // On update pend the handler to reconfigure the next compare match - pend_handler(); - - Ok(()) - } else { - Err(()) - } - } - - fn dequeue_task_queue( - &mut self, - instant: Mono::Instant, - mono: &mut Mono, - ) -> Option<(Task, u8)> { - if instant <= mono.now() { - // task became ready - let nr = unsafe { self.task_queue.pop_unchecked() }; - Some((nr.task, nr.index)) - } else { - // Set compare - mono.set_compare(instant); - - // Double check that the instant we set is really in the future, else - // dequeue. If the monotonic is fast enough it can happen that from the - // read of now to the set of the compare, the time can overflow. This is to - // guard against this. - if instant <= mono.now() { - let nr = unsafe { self.task_queue.pop_unchecked() }; - Some((nr.task, nr.index)) - } else { - None - } - } - } - - fn dequeue_waker_queue(&mut self, instant: Mono::Instant, mono: &mut Mono) -> bool { - let mut did_wake = false; - - if instant <= mono.now() { - // Task became ready, wake the waker - if let Some(v) = self.waker_queue.pop() { - v.val.waker.wake_by_ref(); - - did_wake = true; - } - } else { - // Set compare - mono.set_compare(instant); - - // Double check that the instant we set is really in the future, else - // dequeue. If the monotonic is fast enough it can happen that from the - // read of now to the set of the compare, the time can overflow. This is to - // guard against this. - if instant <= mono.now() { - if let Some(v) = self.waker_queue.pop() { - v.val.waker.wake_by_ref(); - - did_wake = true; - } - } - } - - did_wake - } - - /// Dequeue a task from the ``TimerQueue`` - pub fn dequeue(&mut self, disable_interrupt: F, mono: &mut Mono) -> Option<(Task, u8)> - where - F: FnOnce(), - { - mono.clear_compare_flag(); - - loop { - let tq = self.task_queue.peek().map(|p| p.instant); - let wq = self.waker_queue.peek().map(|p| p.instant); - - let dequeue_task; - let instant; - - match (tq, wq) { - (Some(tq_instant), Some(wq_instant)) => { - if tq_instant <= wq_instant { - dequeue_task = true; - instant = tq_instant; - } else { - dequeue_task = false; - instant = wq_instant; - } - } - (Some(tq_instant), None) => { - dequeue_task = true; - instant = tq_instant; - } - (None, Some(wq_instant)) => { - dequeue_task = false; - instant = wq_instant; - } - (None, None) => { - // The queue is empty, disable the interrupt. - if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE { - disable_interrupt(); - mono.disable_timer(); - } - - return None; - } - } - - if dequeue_task { - return self.dequeue_task_queue(instant, mono); - } else if !self.dequeue_waker_queue(instant, mono) { - return None; - } else { - // Run the dequeue again - } - } - } -} - -pub struct TaskNotReady -where - Task: Copy, - Mono: Monotonic, -{ - pub task: Task, - pub index: u8, - pub instant: Mono::Instant, - pub marker: u32, -} - -impl Eq for TaskNotReady -where - Task: Copy, - Mono: Monotonic, -{ -} - -impl Ord for TaskNotReady -where - Task: Copy, - Mono: Monotonic, -{ - fn cmp(&self, other: &Self) -> Ordering { - self.instant.cmp(&other.instant) - } -} - -impl PartialEq for TaskNotReady -where - Task: Copy, - Mono: Monotonic, -{ - fn eq(&self, other: &Self) -> bool { - self.instant == other.instant - } -} - -impl PartialOrd for TaskNotReady -where - Task: Copy, - Mono: Monotonic, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -pub struct WakerNotReady -where - Mono: Monotonic, -{ - pub waker: Waker, - pub instant: Mono::Instant, - pub marker: u32, -} - -impl Eq for WakerNotReady where Mono: Monotonic {} - -impl Ord for WakerNotReady -where - Mono: Monotonic, -{ - fn cmp(&self, other: &Self) -> Ordering { - self.instant.cmp(&other.instant) - } -} - -impl PartialEq for WakerNotReady -where - Mono: Monotonic, -{ - fn eq(&self, other: &Self) -> bool { - self.instant == other.instant - } -} - -impl PartialOrd for WakerNotReady -where - Mono: Monotonic, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -- cgit v1.2.3 From 714020a624ca93c42d5da7ebe612e7fc668e1471 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sat, 7 Jan 2023 11:24:13 +0100 Subject: Removed Priority, simplified lifetime handling --- src/export.rs | 103 ++++++++++++++-------------------------------------------- 1 file changed, 25 insertions(+), 78 deletions(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 2cc031e..49ebd87 100644 --- a/src/export.rs +++ b/src/export.rs @@ -163,38 +163,6 @@ impl Barrier { } } -// Newtype over `Cell` that forbids mutation through a shared reference -pub struct Priority { - inner: Cell, -} - -impl Priority { - /// Create a new Priority - /// - /// # Safety - /// - /// Will overwrite the current Priority - #[inline(always)] - pub const unsafe fn new(value: u8) -> Self { - Priority { - inner: Cell::new(value), - } - } - - /// Change the current priority to `value` - // These two methods are used by `lock` (see below) but can't be used from the RTIC application - #[inline(always)] - fn set(&self, value: u8) { - self.inner.set(value); - } - - /// Get the current priority - #[inline(always)] - fn get(&self) -> u8 { - self.inner.get() - } -} - /// Const helper to check architecture pub const fn have_basepri() -> bool { #[cfg(have_basepri)] @@ -260,30 +228,20 @@ where #[inline(always)] pub unsafe fn lock( ptr: *mut T, - priority: &Priority, ceiling: u8, nvic_prio_bits: u8, _mask: &[Mask; 3], f: impl FnOnce(&mut T) -> R, ) -> R { - let current = priority.get(); - - if current < ceiling { - if ceiling == (1 << nvic_prio_bits) { - priority.set(u8::max_value()); - let r = interrupt::free(|_| f(&mut *ptr)); - priority.set(current); - r - } else { - priority.set(ceiling); - basepri::write(logical2hw(ceiling, nvic_prio_bits)); - let r = f(&mut *ptr); - basepri::write(logical2hw(current, nvic_prio_bits)); - priority.set(current); - r - } + if ceiling == (1 << nvic_prio_bits) { + let r = interrupt::free(|_| f(&mut *ptr)); + r } else { - f(&mut *ptr) + let current = basepri::read(); + basepri::write(logical2hw(ceiling, nvic_prio_bits)); + let r = f(&mut *ptr); + basepri::write(logical2hw(current, nvic_prio_bits)); + r } } @@ -335,40 +293,29 @@ pub unsafe fn lock( #[inline(always)] pub unsafe fn lock( ptr: *mut T, - priority: &Priority, ceiling: u8, _nvic_prio_bits: u8, masks: &[Mask; 3], f: impl FnOnce(&mut T) -> R, ) -> R { - let current = priority.get(); - if current < ceiling { - if ceiling >= 4 { - // safe to manipulate outside critical section - priority.set(ceiling); - // execute closure under protection of raised system ceiling - let r = interrupt::free(|_| f(&mut *ptr)); - // safe to manipulate outside critical section - priority.set(current); - r - } else { - // safe to manipulate outside critical section - priority.set(ceiling); - let mask = compute_mask(current, ceiling, masks); - clear_enable_mask(mask); - - // execute closure under protection of raised system ceiling - let r = f(&mut *ptr); - - set_enable_mask(mask); - - // safe to manipulate outside critical section - priority.set(current); - r - } + if ceiling >= 4 { + // safe to manipulate outside critical section + // execute closure under protection of raised system ceiling + let r = interrupt::free(|_| f(&mut *ptr)); + // safe to manipulate outside critical section + r } else { - // execute closure without raising system ceiling - f(&mut *ptr) + // safe to manipulate outside critical section + let mask = compute_mask(0, ceiling, masks); + clear_enable_mask(mask); + + // execute closure under protection of raised system ceiling + let r = f(&mut *ptr); + + set_enable_mask(mask); + + // safe to manipulate outside critical section + r } } -- cgit v1.2.3 From 6dc2d29cd994a27fa59e23f9fb0bece677c83ffa Mon Sep 17 00:00:00 2001 From: Per Lindgren Date: Sat, 7 Jan 2023 14:07:50 +0100 Subject: export Cell removed, expmples updated --- src/export.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 49ebd87..d6b2fc0 100644 --- a/src/export.rs +++ b/src/export.rs @@ -1,8 +1,5 @@ pub use bare_metal::CriticalSection; -use core::{ - cell::Cell, - sync::atomic::{AtomicBool, Ordering}, -}; +use core::sync::atomic::{AtomicBool, Ordering}; pub use cortex_m::{ asm::nop, asm::wfi, -- cgit v1.2.3 From 5606ba3cf38c80be5d3e9c88ad4da9982b114851 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sat, 7 Jan 2023 14:38:04 +0100 Subject: Fix locks, basepri writeback error --- src/export.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index d6b2fc0..bfd0f6d 100644 --- a/src/export.rs +++ b/src/export.rs @@ -237,7 +237,7 @@ pub unsafe fn lock( let current = basepri::read(); basepri::write(logical2hw(ceiling, nvic_prio_bits)); let r = f(&mut *ptr); - basepri::write(logical2hw(current, nvic_prio_bits)); + basepri::write(current); r } } -- cgit v1.2.3 From c40c89bb4edc22c4a60d8677c660a9ab7eb47e92 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 8 Jan 2023 21:30:53 +0100 Subject: Clippy fixes --- src/export.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index bfd0f6d..091cfb8 100644 --- a/src/export.rs +++ b/src/export.rs @@ -298,9 +298,9 @@ pub unsafe fn lock( if ceiling >= 4 { // safe to manipulate outside critical section // execute closure under protection of raised system ceiling - let r = interrupt::free(|_| f(&mut *ptr)); + // safe to manipulate outside critical section - r + interrupt::free(|_| f(&mut *ptr)) } else { // safe to manipulate outside critical section let mask = compute_mask(0, ceiling, masks); -- cgit v1.2.3 From 95e494968053a17ac05a0c1cec9d8b2c7d450296 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 8 Jan 2023 21:33:44 +0100 Subject: Start CI, disable docs building --- src/export.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 091cfb8..7beaf16 100644 --- a/src/export.rs +++ b/src/export.rs @@ -298,7 +298,7 @@ pub unsafe fn lock( if ceiling >= 4 { // safe to manipulate outside critical section // execute closure under protection of raised system ceiling - + // safe to manipulate outside critical section interrupt::free(|_| f(&mut *ptr)) } else { -- cgit v1.2.3 From 1eabb94f0424d7ff85786ad05615da69a379f01d Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 9 Jan 2023 09:48:39 +0100 Subject: New executor design --- src/export.rs | 68 +-------------------------------- src/export/executor.rs | 100 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 67 deletions(-) create mode 100644 src/export/executor.rs (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 7beaf16..6017dcf 100644 --- a/src/export.rs +++ b/src/export.rs @@ -8,73 +8,7 @@ pub use cortex_m::{ Peripherals, }; -pub mod executor { - use core::{ - future::Future, - mem, - pin::Pin, - task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, - }; - - static WAKER_VTABLE: RawWakerVTable = - RawWakerVTable::new(waker_clone, waker_wake, waker_wake, waker_drop); - - unsafe fn waker_clone(p: *const ()) -> RawWaker { - RawWaker::new(p, &WAKER_VTABLE) - } - - unsafe fn waker_wake(p: *const ()) { - // The only thing we need from a waker is the function to call to pend the async - // dispatcher. - let f: fn() = mem::transmute(p); - f(); - } - - unsafe fn waker_drop(_: *const ()) { - // nop - } - - //============ - // AsyncTaskExecutor - - pub struct AsyncTaskExecutor { - task: Option, - } - - impl AsyncTaskExecutor { - pub const fn new() -> Self { - Self { task: None } - } - - pub fn is_running(&self) -> bool { - self.task.is_some() - } - - pub fn spawn(&mut self, future: F) { - self.task = Some(future); - } - - pub fn poll(&mut self, wake: fn()) -> bool { - if let Some(future) = &mut self.task { - unsafe { - let waker = Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE)); - let mut cx = Context::from_waker(&waker); - let future = Pin::new_unchecked(future); - - match future.poll(&mut cx) { - Poll::Ready(_) => { - self.task = None; - true // Only true if we finished now - } - Poll::Pending => false, - } - } - } else { - false - } - } - } -} +pub mod executor; /// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23). /// It needs to be large enough to cover all the relevant interrupts in use. diff --git a/src/export/executor.rs b/src/export/executor.rs new file mode 100644 index 0000000..874ee19 --- /dev/null +++ b/src/export/executor.rs @@ -0,0 +1,100 @@ +use core::{ + cell::UnsafeCell, + future::Future, + mem::{self, MaybeUninit}, + pin::Pin, + sync::atomic::{AtomicBool, Ordering}, + task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, +}; + +static WAKER_VTABLE: RawWakerVTable = + RawWakerVTable::new(waker_clone, waker_wake, waker_wake, waker_drop); + +unsafe fn waker_clone(p: *const ()) -> RawWaker { + RawWaker::new(p, &WAKER_VTABLE) +} + +unsafe fn waker_wake(p: *const ()) { + // The only thing we need from a waker is the function to call to pend the async + // dispatcher. + let f: fn() = mem::transmute(p); + f(); +} + +unsafe fn waker_drop(_: *const ()) { + // nop +} + +//============ +// AsyncTaskExecutor + +/// Executor for an async task. +pub struct AsyncTaskExecutor { + // `task` is proteced by the `running` flag. + task: UnsafeCell>, + running: AtomicBool, + pending: AtomicBool, +} + +unsafe impl Sync for AsyncTaskExecutor {} + +impl AsyncTaskExecutor { + /// Create a new executor. + pub const fn new() -> Self { + Self { + task: UnsafeCell::new(MaybeUninit::uninit()), + running: AtomicBool::new(false), + pending: AtomicBool::new(false), + } + } + + /// Check if there is an active task in the executor. + pub fn is_running(&self) -> bool { + self.running.load(Ordering::Relaxed) + } + + /// Checks if a waker has pended the executor. + pub fn is_pending(&self) -> bool { + self.pending.load(Ordering::Relaxed) + } + + // Used by wakers to indicate that the executor needs to run. + pub fn set_pending(&self) { + self.pending.store(true, Ordering::Release); + } + + /// Try to reserve the executor for a future. + /// Used in conjunction with `spawn_unchecked` to reserve the executor before spawning. + /// + /// This could have been joined with `spawn_unchecked` for a complete safe API, however the + /// codegen needs to see if the reserve fails so it can give back input parameters. If spawning + /// was done within the same call the input parameters would be lost and could not be returned. + pub fn try_reserve(&self) -> bool { + self.running + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + } + + /// Spawn a future, only valid to do after `try_reserve` succeeds. + pub unsafe fn spawn_unchecked(&self, future: F) { + debug_assert!(self.running.load(Ordering::Relaxed)); + + self.task.get().write(MaybeUninit::new(future)); + } + + /// Poll the future in the executor. + pub fn poll(&self, wake: fn()) { + if self.is_running() { + let waker = unsafe { Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE)) }; + let mut cx = Context::from_waker(&waker); + let future = unsafe { Pin::new_unchecked(&mut *(self.task.get() as *mut F)) }; + + match future.poll(&mut cx) { + Poll::Ready(_) => { + self.running.store(false, Ordering::Release); + } + Poll::Pending => {} + } + } + } +} -- cgit v1.2.3 From cd790a94286cdc307d399b7f7a43e305e90de5bf Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 9 Jan 2023 21:02:53 +0100 Subject: More work on new spawn/executor --- src/export.rs | 25 ++----------------------- src/export/executor.rs | 11 +++++++---- 2 files changed, 9 insertions(+), 27 deletions(-) (limited to 'src') diff --git a/src/export.rs b/src/export.rs index 6017dcf..cdca972 100644 --- a/src/export.rs +++ b/src/export.rs @@ -1,5 +1,4 @@ pub use bare_metal::CriticalSection; -use core::sync::atomic::{AtomicBool, Ordering}; pub use cortex_m::{ asm::nop, asm::wfi, @@ -7,6 +6,8 @@ pub use cortex_m::{ peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST}, Peripherals, }; +//pub use portable_atomic as atomic; +pub use atomic_polyfill as atomic; pub mod executor; @@ -72,28 +73,6 @@ where f(); } -pub struct Barrier { - inner: AtomicBool, -} - -impl Barrier { - pub const fn new() -> Self { - Barrier { - inner: AtomicBool::new(false), - } - } - - pub fn release(&self) { - self.inner.store(true, Ordering::Release); - } - - pub fn wait(&self) { - while !self.inner.load(Ordering::Acquire) { - core::hint::spin_loop() - } - } -} - /// Const helper to check architecture pub const fn have_basepri() -> bool { #[cfg(have_basepri)] diff --git a/src/export/executor.rs b/src/export/executor.rs index 874ee19..2f88eff 100644 --- a/src/export/executor.rs +++ b/src/export/executor.rs @@ -1,9 +1,9 @@ +use super::atomic::{AtomicBool, Ordering}; use core::{ cell::UnsafeCell, future::Future, mem::{self, MaybeUninit}, pin::Pin, - sync::atomic::{AtomicBool, Ordering}, task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, }; @@ -53,9 +53,11 @@ impl AsyncTaskExecutor { self.running.load(Ordering::Relaxed) } - /// Checks if a waker has pended the executor. - pub fn is_pending(&self) -> bool { - self.pending.load(Ordering::Relaxed) + /// Checks if a waker has pended the executor and simultaneously clears the flag. + pub fn check_and_clear_pending(&self) -> bool { + self.pending + .compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed) + .is_ok() } // Used by wakers to indicate that the executor needs to run. @@ -80,6 +82,7 @@ impl AsyncTaskExecutor { debug_assert!(self.running.load(Ordering::Relaxed)); self.task.get().write(MaybeUninit::new(future)); + self.set_pending(); } /// Poll the future in the executor. -- cgit v1.2.3 From 5688a5d332cdaffaca64ade5b138a3676ac7cd32 Mon Sep 17 00:00:00 2001 From: Per Lindgren Date: Thu, 12 Jan 2023 08:50:12 +0100 Subject: executor update for less unsafe and more clear --- src/export/executor.rs | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) (limited to 'src') diff --git a/src/export/executor.rs b/src/export/executor.rs index 2f88eff..e64cc43 100644 --- a/src/export/executor.rs +++ b/src/export/executor.rs @@ -30,7 +30,7 @@ unsafe fn waker_drop(_: *const ()) { /// Executor for an async task. pub struct AsyncTaskExecutor { - // `task` is proteced by the `running` flag. + // `task` is protected by the `running` flag. task: UnsafeCell>, running: AtomicBool, pending: AtomicBool, @@ -40,6 +40,7 @@ unsafe impl Sync for AsyncTaskExecutor {} impl AsyncTaskExecutor { /// Create a new executor. + #[inline(always)] pub const fn new() -> Self { Self { task: UnsafeCell::new(MaybeUninit::uninit()), @@ -49,45 +50,51 @@ impl AsyncTaskExecutor { } /// Check if there is an active task in the executor. + #[inline(always)] pub fn is_running(&self) -> bool { self.running.load(Ordering::Relaxed) } /// Checks if a waker has pended the executor and simultaneously clears the flag. - pub fn check_and_clear_pending(&self) -> bool { + #[inline(always)] + fn check_and_clear_pending(&self) -> bool { + // Ordering::Acquire to enforce that update of task is visible to poll self.pending - .compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed) + .compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) .is_ok() } // Used by wakers to indicate that the executor needs to run. + #[inline(always)] pub fn set_pending(&self) { self.pending.store(true, Ordering::Release); } - /// Try to reserve the executor for a future. - /// Used in conjunction with `spawn_unchecked` to reserve the executor before spawning. - /// - /// This could have been joined with `spawn_unchecked` for a complete safe API, however the - /// codegen needs to see if the reserve fails so it can give back input parameters. If spawning - /// was done within the same call the input parameters would be lost and could not be returned. - pub fn try_reserve(&self) -> bool { - self.running + /// Spawn a future + #[inline(always)] + pub fn spawn(&self, future: impl Fn() -> F) -> bool { + // Try to reserve the executor for a future. + if self + .running .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .is_ok() - } - - /// Spawn a future, only valid to do after `try_reserve` succeeds. - pub unsafe fn spawn_unchecked(&self, future: F) { - debug_assert!(self.running.load(Ordering::Relaxed)); + { + // This unsafe is protected by `running` being false and the atomic setting it to true. + unsafe { + self.task.get().write(MaybeUninit::new(future())); + } + self.set_pending(); - self.task.get().write(MaybeUninit::new(future)); - self.set_pending(); + true + } else { + false + } } /// Poll the future in the executor. + #[inline(always)] pub fn poll(&self, wake: fn()) { - if self.is_running() { + if self.is_running() && self.check_and_clear_pending() { let waker = unsafe { Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE)) }; let mut cx = Context::from_waker(&waker); let future = unsafe { Pin::new_unchecked(&mut *(self.task.get() as *mut F)) }; -- cgit v1.2.3 From 306aa47170fd59369b7a184924e287dc3706d64d Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 23 Jan 2023 20:05:47 +0100 Subject: Add rtic-timer (timerqueue + monotonic) and rtic-monotonics (systick-monotonic) --- src/export.rs | 324 ------------------------------------------------- src/export/executor.rs | 110 ----------------- src/lib.rs | 121 ------------------ 3 files changed, 555 deletions(-) delete mode 100644 src/export.rs delete mode 100644 src/export/executor.rs delete mode 100644 src/lib.rs (limited to 'src') diff --git a/src/export.rs b/src/export.rs deleted file mode 100644 index cdca972..0000000 --- a/src/export.rs +++ /dev/null @@ -1,324 +0,0 @@ -pub use bare_metal::CriticalSection; -pub use cortex_m::{ - asm::nop, - asm::wfi, - interrupt, - peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST}, - Peripherals, -}; -//pub use portable_atomic as atomic; -pub use atomic_polyfill as atomic; - -pub mod executor; - -/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23). -/// It needs to be large enough to cover all the relevant interrupts in use. -/// For M0/M0+ there are only 32 interrupts so we only need one u32 value. -/// For M23 there can be as many as 480 interrupts. -/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in -/// use at compile time and allocate enough u32 chunks to cover them. -#[derive(Copy, Clone)] -pub struct Mask([u32; M]); - -impl core::ops::BitOrAssign for Mask { - fn bitor_assign(&mut self, rhs: Self) { - for i in 0..M { - self.0[i] |= rhs.0[i]; - } - } -} - -#[cfg(not(have_basepri))] -impl Mask { - /// Set a bit inside a Mask. - const fn set_bit(mut self, bit: u32) -> Self { - let block = bit / 32; - - if block as usize >= M { - panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?"); - } - - let offset = bit - (block * 32); - self.0[block as usize] |= 1 << offset; - self - } -} - -#[cfg(have_basepri)] -use cortex_m::register::basepri; - -#[cfg(have_basepri)] -#[inline(always)] -pub fn run(priority: u8, f: F) -where - F: FnOnce(), -{ - if priority == 1 { - // If the priority of this interrupt is `1` then BASEPRI can only be `0` - f(); - unsafe { basepri::write(0) } - } else { - let initial = basepri::read(); - f(); - unsafe { basepri::write(initial) } - } -} - -#[cfg(not(have_basepri))] -#[inline(always)] -pub fn run(_priority: u8, f: F) -where - F: FnOnce(), -{ - f(); -} - -/// Const helper to check architecture -pub const fn have_basepri() -> bool { - #[cfg(have_basepri)] - { - true - } - - #[cfg(not(have_basepri))] - { - false - } -} - -#[inline(always)] -pub fn assert_send() -where - T: Send, -{ -} - -#[inline(always)] -pub fn assert_sync() -where - T: Sync, -{ -} - -/// Lock implementation using BASEPRI and global Critical Section (CS) -/// -/// # Safety -/// -/// The system ceiling is raised from current to ceiling -/// by either -/// - raising the BASEPRI to the ceiling value, or -/// - disable all interrupts in case we want to -/// mask interrupts with maximum priority -/// -/// Dereferencing a raw pointer inside CS -/// -/// The priority.set/priority.get can safely be outside the CS -/// as being a context local cell (not affected by preemptions). -/// It is merely used in order to omit masking in case current -/// priority is current priority >= ceiling. -/// -/// Lock Efficiency: -/// Experiments validate (sub)-zero cost for CS implementation -/// (Sub)-zero as: -/// - Either zero OH (lock optimized out), or -/// - Amounting to an optimal assembly implementation -/// - The BASEPRI value is folded to a constant at compile time -/// - CS entry, single assembly instruction to write BASEPRI -/// - CS exit, single assembly instruction to write BASEPRI -/// - priority.set/get optimized out (their effect not) -/// - On par or better than any handwritten implementation of SRP -/// -/// Limitations: -/// The current implementation reads/writes BASEPRI once -/// even in some edge cases where this may be omitted. -/// Total OH of per task is max 2 clock cycles, negligible in practice -/// but can in theory be fixed. -/// -#[cfg(have_basepri)] -#[inline(always)] -pub unsafe fn lock( - ptr: *mut T, - ceiling: u8, - nvic_prio_bits: u8, - _mask: &[Mask; 3], - f: impl FnOnce(&mut T) -> R, -) -> R { - if ceiling == (1 << nvic_prio_bits) { - let r = interrupt::free(|_| f(&mut *ptr)); - r - } else { - let current = basepri::read(); - basepri::write(logical2hw(ceiling, nvic_prio_bits)); - let r = f(&mut *ptr); - basepri::write(current); - r - } -} - -/// Lock implementation using interrupt masking -/// -/// # Safety -/// -/// The system ceiling is raised from current to ceiling -/// by computing a 32 bit `mask` (1 bit per interrupt) -/// 1: ceiling >= priority > current -/// 0: else -/// -/// On CS entry, `clear_enable_mask(mask)` disables interrupts -/// On CS exit, `set_enable_mask(mask)` re-enables interrupts -/// -/// The priority.set/priority.get can safely be outside the CS -/// as being a context local cell (not affected by preemptions). -/// It is merely used in order to omit masking in case -/// current priority >= ceiling. -/// -/// Dereferencing a raw pointer is done safely inside the CS -/// -/// Lock Efficiency: -/// Early experiments validate (sub)-zero cost for CS implementation -/// (Sub)-zero as: -/// - Either zero OH (lock optimized out), or -/// - Amounting to an optimal assembly implementation -/// - if ceiling == (1 << nvic_prio_bits) -/// - we execute the closure in a global critical section (interrupt free) -/// - CS entry cost, single write to core register -/// - CS exit cost, single write to core register -/// else -/// - The `mask` value is folded to a constant at compile time -/// - CS entry, single write of the 32 bit `mask` to the `icer` register -/// - CS exit, single write of the 32 bit `mask` to the `iser` register -/// - priority.set/get optimized out (their effect not) -/// - On par or better than any hand written implementation of SRP -/// -/// Limitations: -/// Current implementation does not allow for tasks with shared resources -/// to be bound to exception handlers, as these cannot be masked in HW. -/// -/// Possible solutions: -/// - Mask exceptions by global critical sections (interrupt::free) -/// - Temporary lower exception priority -/// -/// These possible solutions are set goals for future work -#[cfg(not(have_basepri))] -#[inline(always)] -pub unsafe fn lock( - ptr: *mut T, - ceiling: u8, - _nvic_prio_bits: u8, - masks: &[Mask; 3], - f: impl FnOnce(&mut T) -> R, -) -> R { - if ceiling >= 4 { - // safe to manipulate outside critical section - // execute closure under protection of raised system ceiling - - // safe to manipulate outside critical section - interrupt::free(|_| f(&mut *ptr)) - } else { - // safe to manipulate outside critical section - let mask = compute_mask(0, ceiling, masks); - clear_enable_mask(mask); - - // execute closure under protection of raised system ceiling - let r = f(&mut *ptr); - - set_enable_mask(mask); - - // safe to manipulate outside critical section - r - } -} - -#[cfg(not(have_basepri))] -#[inline(always)] -fn compute_mask(from_prio: u8, to_prio: u8, masks: &[Mask; 3]) -> Mask { - let mut res = Mask([0; M]); - masks[from_prio as usize..to_prio as usize] - .iter() - .for_each(|m| res |= *m); - res -} - -// enables interrupts -#[cfg(not(have_basepri))] -#[inline(always)] -unsafe fn set_enable_mask(mask: Mask) { - for i in 0..M { - // This check should involve compile time constants and be optimized out. - if mask.0[i] != 0 { - (*NVIC::PTR).iser[i].write(mask.0[i]); - } - } -} - -// disables interrupts -#[cfg(not(have_basepri))] -#[inline(always)] -unsafe fn clear_enable_mask(mask: Mask) { - for i in 0..M { - // This check should involve compile time constants and be optimized out. - if mask.0[i] != 0 { - (*NVIC::PTR).icer[i].write(mask.0[i]); - } - } -} - -#[inline] -#[must_use] -pub fn logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 { - ((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits) -} - -#[cfg(have_basepri)] -pub const fn create_mask(_: [u32; N]) -> Mask { - Mask([0; M]) -} - -#[cfg(not(have_basepri))] -pub const fn create_mask(list_of_shifts: [u32; N]) -> Mask { - let mut mask = Mask([0; M]); - let mut i = 0; - - while i < N { - let shift = list_of_shifts[i]; - i += 1; - mask = mask.set_bit(shift); - } - - mask -} - -#[cfg(have_basepri)] -pub const fn compute_mask_chunks(_: [u32; L]) -> usize { - 0 -} - -/// Compute the number of u32 chunks needed to store the Mask value. -/// On M0, M0+ this should always end up being 1. -/// On M23 we will pick a number that allows us to store the highest index used by the code. -/// This means the amount of overhead will vary based on the actually interrupts used by the code. -#[cfg(not(have_basepri))] -pub const fn compute_mask_chunks(ids: [u32; L]) -> usize { - let mut max: usize = 0; - let mut i = 0; - - while i < L { - let id = ids[i] as usize; - i += 1; - - if id > max { - max = id; - } - } - (max + 32) / 32 -} - -#[cfg(have_basepri)] -pub const fn no_basepri_panic() { - // For non-v6 all is fine -} - -#[cfg(not(have_basepri))] -pub const fn no_basepri_panic() { - panic!("Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources"); -} diff --git a/src/export/executor.rs b/src/export/executor.rs deleted file mode 100644 index e64cc43..0000000 --- a/src/export/executor.rs +++ /dev/null @@ -1,110 +0,0 @@ -use super::atomic::{AtomicBool, Ordering}; -use core::{ - cell::UnsafeCell, - future::Future, - mem::{self, MaybeUninit}, - pin::Pin, - task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, -}; - -static WAKER_VTABLE: RawWakerVTable = - RawWakerVTable::new(waker_clone, waker_wake, waker_wake, waker_drop); - -unsafe fn waker_clone(p: *const ()) -> RawWaker { - RawWaker::new(p, &WAKER_VTABLE) -} - -unsafe fn waker_wake(p: *const ()) { - // The only thing we need from a waker is the function to call to pend the async - // dispatcher. - let f: fn() = mem::transmute(p); - f(); -} - -unsafe fn waker_drop(_: *const ()) { - // nop -} - -//============ -// AsyncTaskExecutor - -/// Executor for an async task. -pub struct AsyncTaskExecutor { - // `task` is protected by the `running` flag. - task: UnsafeCell>, - running: AtomicBool, - pending: AtomicBool, -} - -unsafe impl Sync for AsyncTaskExecutor {} - -impl AsyncTaskExecutor { - /// Create a new executor. - #[inline(always)] - pub const fn new() -> Self { - Self { - task: UnsafeCell::new(MaybeUninit::uninit()), - running: AtomicBool::new(false), - pending: AtomicBool::new(false), - } - } - - /// Check if there is an active task in the executor. - #[inline(always)] - pub fn is_running(&self) -> bool { - self.running.load(Ordering::Relaxed) - } - - /// Checks if a waker has pended the executor and simultaneously clears the flag. - #[inline(always)] - fn check_and_clear_pending(&self) -> bool { - // Ordering::Acquire to enforce that update of task is visible to poll - self.pending - .compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - } - - // Used by wakers to indicate that the executor needs to run. - #[inline(always)] - pub fn set_pending(&self) { - self.pending.store(true, Ordering::Release); - } - - /// Spawn a future - #[inline(always)] - pub fn spawn(&self, future: impl Fn() -> F) -> bool { - // Try to reserve the executor for a future. - if self - .running - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - { - // This unsafe is protected by `running` being false and the atomic setting it to true. - unsafe { - self.task.get().write(MaybeUninit::new(future())); - } - self.set_pending(); - - true - } else { - false - } - } - - /// Poll the future in the executor. - #[inline(always)] - pub fn poll(&self, wake: fn()) { - if self.is_running() && self.check_and_clear_pending() { - let waker = unsafe { Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE)) }; - let mut cx = Context::from_waker(&waker); - let future = unsafe { Pin::new_unchecked(&mut *(self.task.get() as *mut F)) }; - - match future.poll(&mut cx) { - Poll::Ready(_) => { - self.running.store(false, Ordering::Release); - } - Poll::Pending => {} - } - } - } -} diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index e8b8140..0000000 --- a/src/lib.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! Real-Time Interrupt-driven Concurrency (RTIC) framework for ARM Cortex-M microcontrollers. -//! -//! **IMPORTANT**: This crate is published as [`cortex-m-rtic`] on crates.io but the name of the -//! library is `rtic`. -//! -//! [`cortex-m-rtic`]: https://crates.io/crates/cortex-m-rtic -//! -//! The user level documentation can be found [here]. -//! -//! [here]: https://rtic.rs -//! -//! Don't forget to check the documentation of the `#[app]` attribute (listed under the reexports -//! section), which is the main component of the framework. -//! -//! # Minimum Supported Rust Version (MSRV) -//! -//! This crate is compiled and tested with the latest toolchain (rolling) as of the release date. -//! If you run into compilation errors, try the latest stable release of the rust toolchain. -//! -//! # Semantic Versioning -//! -//! Like the Rust project, this crate adheres to [SemVer]: breaking changes in the API and semantics -//! require a *semver bump* (since 1.0.0 a new major version release), with the exception of breaking changes -//! that fix soundness issues -- those are considered bug fixes and can be landed in a new patch -//! release. -//! -//! [SemVer]: https://semver.org/spec/v2.0.0.html - -#![deny(missing_docs)] -#![deny(rust_2021_compatibility)] -#![deny(rust_2018_compatibility)] -#![deny(rust_2018_idioms)] -#![no_std] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg", - html_favicon_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg" -)] -//deny_warnings_placeholder_for_ci -#![allow(clippy::inline_always)] - -use cortex_m::{interrupt::InterruptNumber, peripheral::NVIC}; -pub use rtic_core::{prelude as mutex_prelude, Exclusive, Mutex}; -pub use rtic_macros::app; -pub use rtic_monotonic::{self, Monotonic}; - -/// module `mutex::prelude` provides `Mutex` and multi-lock variants. Recommended over `mutex_prelude` -pub mod mutex { - pub use rtic_core::prelude; - pub use rtic_core::Mutex; -} - -#[doc(hidden)] -pub mod export; - -/// Sets the given `interrupt` as pending -/// -/// This is a convenience function around -/// [`NVIC::pend`](../cortex_m/peripheral/struct.NVIC.html#method.pend) -pub fn pend(interrupt: I) -where - I: InterruptNumber, -{ - NVIC::pend(interrupt); -} - -use core::cell::UnsafeCell; - -/// Internal replacement for `static mut T` -/// -/// Used to represent RTIC Resources -/// -/// Soundness: -/// 1) Unsafe API for internal use only -/// 2) ``get_mut(&self) -> *mut T`` -/// returns a raw mutable pointer to the inner T -/// casting to &mut T is under control of RTIC -/// RTIC ensures &mut T to be unique under Rust aliasing rules. -/// -/// Implementation uses the underlying ``UnsafeCell`` -/// self.0.get() -> *mut T -/// -/// 3) get(&self) -> *const T -/// returns a raw immutable (const) pointer to the inner T -/// casting to &T is under control of RTIC -/// RTIC ensures &T to be shared under Rust aliasing rules. -/// -/// Implementation uses the underlying ``UnsafeCell`` -/// self.0.get() -> *mut T, demoted to *const T -/// -#[repr(transparent)] -pub struct RacyCell(UnsafeCell); - -impl RacyCell { - /// Create a ``RacyCell`` - #[inline(always)] - pub const fn new(value: T) -> Self { - RacyCell(UnsafeCell::new(value)) - } - - /// Get `*mut T` - /// - /// # Safety - /// - /// See documentation notes for [`RacyCell`] - #[inline(always)] - pub unsafe fn get_mut(&self) -> *mut T { - self.0.get() - } - - /// Get `*const T` - /// - /// # Safety - /// - /// See documentation notes for [`RacyCell`] - #[inline(always)] - pub unsafe fn get(&self) -> *const T { - self.0.get() - } -} - -unsafe impl Sync for RacyCell {} -- cgit v1.2.3