aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/export.rs324
-rw-r--r--src/export/executor.rs110
-rw-r--r--src/lib.rs121
3 files changed, 0 insertions, 555 deletions
diff --git a/src/export.rs b/src/export.rs
deleted file mode 100644
index cdca972..0000000
--- a/src/export.rs
+++ /dev/null
@@ -1,324 +0,0 @@
-pub use bare_metal::CriticalSection;
-pub use cortex_m::{
- asm::nop,
- asm::wfi,
- interrupt,
- peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST},
- Peripherals,
-};
-//pub use portable_atomic as atomic;
-pub use atomic_polyfill as atomic;
-
-pub mod executor;
-
-/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
-/// It needs to be large enough to cover all the relevant interrupts in use.
-/// For M0/M0+ there are only 32 interrupts so we only need one u32 value.
-/// For M23 there can be as many as 480 interrupts.
-/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in
-/// use at compile time and allocate enough u32 chunks to cover them.
-#[derive(Copy, Clone)]
-pub struct Mask<const M: usize>([u32; M]);
-
-impl<const M: usize> core::ops::BitOrAssign for Mask<M> {
- fn bitor_assign(&mut self, rhs: Self) {
- for i in 0..M {
- self.0[i] |= rhs.0[i];
- }
- }
-}
-
-#[cfg(not(have_basepri))]
-impl<const M: usize> Mask<M> {
- /// Set a bit inside a Mask.
- const fn set_bit(mut self, bit: u32) -> Self {
- let block = bit / 32;
-
- if block as usize >= M {
- panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?");
- }
-
- let offset = bit - (block * 32);
- self.0[block as usize] |= 1 << offset;
- self
- }
-}
-
-#[cfg(have_basepri)]
-use cortex_m::register::basepri;
-
-#[cfg(have_basepri)]
-#[inline(always)]
-pub fn run<F>(priority: u8, f: F)
-where
- F: FnOnce(),
-{
- if priority == 1 {
- // If the priority of this interrupt is `1` then BASEPRI can only be `0`
- f();
- unsafe { basepri::write(0) }
- } else {
- let initial = basepri::read();
- f();
- unsafe { basepri::write(initial) }
- }
-}
-
-#[cfg(not(have_basepri))]
-#[inline(always)]
-pub fn run<F>(_priority: u8, f: F)
-where
- F: FnOnce(),
-{
- f();
-}
-
-/// Const helper to check architecture
-pub const fn have_basepri() -> bool {
- #[cfg(have_basepri)]
- {
- true
- }
-
- #[cfg(not(have_basepri))]
- {
- false
- }
-}
-
-#[inline(always)]
-pub fn assert_send<T>()
-where
- T: Send,
-{
-}
-
-#[inline(always)]
-pub fn assert_sync<T>()
-where
- T: Sync,
-{
-}
-
-/// Lock implementation using BASEPRI and global Critical Section (CS)
-///
-/// # Safety
-///
-/// The system ceiling is raised from current to ceiling
-/// by either
-/// - raising the BASEPRI to the ceiling value, or
-/// - disable all interrupts in case we want to
-/// mask interrupts with maximum priority
-///
-/// Dereferencing a raw pointer inside CS
-///
-/// The priority.set/priority.get can safely be outside the CS
-/// as being a context local cell (not affected by preemptions).
-/// It is merely used in order to omit masking in case current
-/// priority is current priority >= ceiling.
-///
-/// Lock Efficiency:
-/// Experiments validate (sub)-zero cost for CS implementation
-/// (Sub)-zero as:
-/// - Either zero OH (lock optimized out), or
-/// - Amounting to an optimal assembly implementation
-/// - The BASEPRI value is folded to a constant at compile time
-/// - CS entry, single assembly instruction to write BASEPRI
-/// - CS exit, single assembly instruction to write BASEPRI
-/// - priority.set/get optimized out (their effect not)
-/// - On par or better than any handwritten implementation of SRP
-///
-/// Limitations:
-/// The current implementation reads/writes BASEPRI once
-/// even in some edge cases where this may be omitted.
-/// Total OH of per task is max 2 clock cycles, negligible in practice
-/// but can in theory be fixed.
-///
-#[cfg(have_basepri)]
-#[inline(always)]
-pub unsafe fn lock<T, R, const M: usize>(
- ptr: *mut T,
- ceiling: u8,
- nvic_prio_bits: u8,
- _mask: &[Mask<M>; 3],
- f: impl FnOnce(&mut T) -> R,
-) -> R {
- if ceiling == (1 << nvic_prio_bits) {
- let r = interrupt::free(|_| f(&mut *ptr));
- r
- } else {
- let current = basepri::read();
- basepri::write(logical2hw(ceiling, nvic_prio_bits));
- let r = f(&mut *ptr);
- basepri::write(current);
- r
- }
-}
-
-/// Lock implementation using interrupt masking
-///
-/// # Safety
-///
-/// The system ceiling is raised from current to ceiling
-/// by computing a 32 bit `mask` (1 bit per interrupt)
-/// 1: ceiling >= priority > current
-/// 0: else
-///
-/// On CS entry, `clear_enable_mask(mask)` disables interrupts
-/// On CS exit, `set_enable_mask(mask)` re-enables interrupts
-///
-/// The priority.set/priority.get can safely be outside the CS
-/// as being a context local cell (not affected by preemptions).
-/// It is merely used in order to omit masking in case
-/// current priority >= ceiling.
-///
-/// Dereferencing a raw pointer is done safely inside the CS
-///
-/// Lock Efficiency:
-/// Early experiments validate (sub)-zero cost for CS implementation
-/// (Sub)-zero as:
-/// - Either zero OH (lock optimized out), or
-/// - Amounting to an optimal assembly implementation
-/// - if ceiling == (1 << nvic_prio_bits)
-/// - we execute the closure in a global critical section (interrupt free)
-/// - CS entry cost, single write to core register
-/// - CS exit cost, single write to core register
-/// else
-/// - The `mask` value is folded to a constant at compile time
-/// - CS entry, single write of the 32 bit `mask` to the `icer` register
-/// - CS exit, single write of the 32 bit `mask` to the `iser` register
-/// - priority.set/get optimized out (their effect not)
-/// - On par or better than any hand written implementation of SRP
-///
-/// Limitations:
-/// Current implementation does not allow for tasks with shared resources
-/// to be bound to exception handlers, as these cannot be masked in HW.
-///
-/// Possible solutions:
-/// - Mask exceptions by global critical sections (interrupt::free)
-/// - Temporary lower exception priority
-///
-/// These possible solutions are set goals for future work
-#[cfg(not(have_basepri))]
-#[inline(always)]
-pub unsafe fn lock<T, R, const M: usize>(
- ptr: *mut T,
- ceiling: u8,
- _nvic_prio_bits: u8,
- masks: &[Mask<M>; 3],
- f: impl FnOnce(&mut T) -> R,
-) -> R {
- if ceiling >= 4 {
- // safe to manipulate outside critical section
- // execute closure under protection of raised system ceiling
-
- // safe to manipulate outside critical section
- interrupt::free(|_| f(&mut *ptr))
- } else {
- // safe to manipulate outside critical section
- let mask = compute_mask(0, ceiling, masks);
- clear_enable_mask(mask);
-
- // execute closure under protection of raised system ceiling
- let r = f(&mut *ptr);
-
- set_enable_mask(mask);
-
- // safe to manipulate outside critical section
- r
- }
-}
-
-#[cfg(not(have_basepri))]
-#[inline(always)]
-fn compute_mask<const M: usize>(from_prio: u8, to_prio: u8, masks: &[Mask<M>; 3]) -> Mask<M> {
- let mut res = Mask([0; M]);
- masks[from_prio as usize..to_prio as usize]
- .iter()
- .for_each(|m| res |= *m);
- res
-}
-
-// enables interrupts
-#[cfg(not(have_basepri))]
-#[inline(always)]
-unsafe fn set_enable_mask<const M: usize>(mask: Mask<M>) {
- for i in 0..M {
- // This check should involve compile time constants and be optimized out.
- if mask.0[i] != 0 {
- (*NVIC::PTR).iser[i].write(mask.0[i]);
- }
- }
-}
-
-// disables interrupts
-#[cfg(not(have_basepri))]
-#[inline(always)]
-unsafe fn clear_enable_mask<const M: usize>(mask: Mask<M>) {
- for i in 0..M {
- // This check should involve compile time constants and be optimized out.
- if mask.0[i] != 0 {
- (*NVIC::PTR).icer[i].write(mask.0[i]);
- }
- }
-}
-
-#[inline]
-#[must_use]
-pub fn logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 {
- ((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits)
-}
-
-#[cfg(have_basepri)]
-pub const fn create_mask<const N: usize, const M: usize>(_: [u32; N]) -> Mask<M> {
- Mask([0; M])
-}
-
-#[cfg(not(have_basepri))]
-pub const fn create_mask<const N: usize, const M: usize>(list_of_shifts: [u32; N]) -> Mask<M> {
- let mut mask = Mask([0; M]);
- let mut i = 0;
-
- while i < N {
- let shift = list_of_shifts[i];
- i += 1;
- mask = mask.set_bit(shift);
- }
-
- mask
-}
-
-#[cfg(have_basepri)]
-pub const fn compute_mask_chunks<const L: usize>(_: [u32; L]) -> usize {
- 0
-}
-
-/// Compute the number of u32 chunks needed to store the Mask value.
-/// On M0, M0+ this should always end up being 1.
-/// On M23 we will pick a number that allows us to store the highest index used by the code.
-/// This means the amount of overhead will vary based on the actually interrupts used by the code.
-#[cfg(not(have_basepri))]
-pub const fn compute_mask_chunks<const L: usize>(ids: [u32; L]) -> usize {
- let mut max: usize = 0;
- let mut i = 0;
-
- while i < L {
- let id = ids[i] as usize;
- i += 1;
-
- if id > max {
- max = id;
- }
- }
- (max + 32) / 32
-}
-
-#[cfg(have_basepri)]
-pub const fn no_basepri_panic() {
- // For non-v6 all is fine
-}
-
-#[cfg(not(have_basepri))]
-pub const fn no_basepri_panic() {
- panic!("Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources");
-}
diff --git a/src/export/executor.rs b/src/export/executor.rs
deleted file mode 100644
index e64cc43..0000000
--- a/src/export/executor.rs
+++ /dev/null
@@ -1,110 +0,0 @@
-use super::atomic::{AtomicBool, Ordering};
-use core::{
- cell::UnsafeCell,
- future::Future,
- mem::{self, MaybeUninit},
- pin::Pin,
- task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
-};
-
-static WAKER_VTABLE: RawWakerVTable =
- RawWakerVTable::new(waker_clone, waker_wake, waker_wake, waker_drop);
-
-unsafe fn waker_clone(p: *const ()) -> RawWaker {
- RawWaker::new(p, &WAKER_VTABLE)
-}
-
-unsafe fn waker_wake(p: *const ()) {
- // The only thing we need from a waker is the function to call to pend the async
- // dispatcher.
- let f: fn() = mem::transmute(p);
- f();
-}
-
-unsafe fn waker_drop(_: *const ()) {
- // nop
-}
-
-//============
-// AsyncTaskExecutor
-
-/// Executor for an async task.
-pub struct AsyncTaskExecutor<F: Future> {
- // `task` is protected by the `running` flag.
- task: UnsafeCell<MaybeUninit<F>>,
- running: AtomicBool,
- pending: AtomicBool,
-}
-
-unsafe impl<F: Future> Sync for AsyncTaskExecutor<F> {}
-
-impl<F: Future> AsyncTaskExecutor<F> {
- /// Create a new executor.
- #[inline(always)]
- pub const fn new() -> Self {
- Self {
- task: UnsafeCell::new(MaybeUninit::uninit()),
- running: AtomicBool::new(false),
- pending: AtomicBool::new(false),
- }
- }
-
- /// Check if there is an active task in the executor.
- #[inline(always)]
- pub fn is_running(&self) -> bool {
- self.running.load(Ordering::Relaxed)
- }
-
- /// Checks if a waker has pended the executor and simultaneously clears the flag.
- #[inline(always)]
- fn check_and_clear_pending(&self) -> bool {
- // Ordering::Acquire to enforce that update of task is visible to poll
- self.pending
- .compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed)
- .is_ok()
- }
-
- // Used by wakers to indicate that the executor needs to run.
- #[inline(always)]
- pub fn set_pending(&self) {
- self.pending.store(true, Ordering::Release);
- }
-
- /// Spawn a future
- #[inline(always)]
- pub fn spawn(&self, future: impl Fn() -> F) -> bool {
- // Try to reserve the executor for a future.
- if self
- .running
- .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
- .is_ok()
- {
- // This unsafe is protected by `running` being false and the atomic setting it to true.
- unsafe {
- self.task.get().write(MaybeUninit::new(future()));
- }
- self.set_pending();
-
- true
- } else {
- false
- }
- }
-
- /// Poll the future in the executor.
- #[inline(always)]
- pub fn poll(&self, wake: fn()) {
- if self.is_running() && self.check_and_clear_pending() {
- let waker = unsafe { Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE)) };
- let mut cx = Context::from_waker(&waker);
- let future = unsafe { Pin::new_unchecked(&mut *(self.task.get() as *mut F)) };
-
- match future.poll(&mut cx) {
- Poll::Ready(_) => {
- self.running.store(false, Ordering::Release);
- }
- Poll::Pending => {}
- }
- }
- }
-}
diff --git a/src/lib.rs b/src/lib.rs
deleted file mode 100644
index e8b8140..0000000
--- a/src/lib.rs
+++ /dev/null
@@ -1,121 +0,0 @@
-//! Real-Time Interrupt-driven Concurrency (RTIC) framework for ARM Cortex-M microcontrollers.
-//!
-//! **IMPORTANT**: This crate is published as [`cortex-m-rtic`] on crates.io but the name of the
-//! library is `rtic`.
-//!
-//! [`cortex-m-rtic`]: https://crates.io/crates/cortex-m-rtic
-//!
-//! The user level documentation can be found [here].
-//!
-//! [here]: https://rtic.rs
-//!
-//! Don't forget to check the documentation of the `#[app]` attribute (listed under the reexports
-//! section), which is the main component of the framework.
-//!
-//! # Minimum Supported Rust Version (MSRV)
-//!
-//! This crate is compiled and tested with the latest toolchain (rolling) as of the release date.
-//! If you run into compilation errors, try the latest stable release of the rust toolchain.
-//!
-//! # Semantic Versioning
-//!
-//! Like the Rust project, this crate adheres to [SemVer]: breaking changes in the API and semantics
-//! require a *semver bump* (since 1.0.0 a new major version release), with the exception of breaking changes
-//! that fix soundness issues -- those are considered bug fixes and can be landed in a new patch
-//! release.
-//!
-//! [SemVer]: https://semver.org/spec/v2.0.0.html
-
-#![deny(missing_docs)]
-#![deny(rust_2021_compatibility)]
-#![deny(rust_2018_compatibility)]
-#![deny(rust_2018_idioms)]
-#![no_std]
-#![doc(
- html_logo_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg",
- html_favicon_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg"
-)]
-//deny_warnings_placeholder_for_ci
-#![allow(clippy::inline_always)]
-
-use cortex_m::{interrupt::InterruptNumber, peripheral::NVIC};
-pub use rtic_core::{prelude as mutex_prelude, Exclusive, Mutex};
-pub use rtic_macros::app;
-pub use rtic_monotonic::{self, Monotonic};
-
-/// module `mutex::prelude` provides `Mutex` and multi-lock variants. Recommended over `mutex_prelude`
-pub mod mutex {
- pub use rtic_core::prelude;
- pub use rtic_core::Mutex;
-}
-
-#[doc(hidden)]
-pub mod export;
-
-/// Sets the given `interrupt` as pending
-///
-/// This is a convenience function around
-/// [`NVIC::pend`](../cortex_m/peripheral/struct.NVIC.html#method.pend)
-pub fn pend<I>(interrupt: I)
-where
- I: InterruptNumber,
-{
- NVIC::pend(interrupt);
-}
-
-use core::cell::UnsafeCell;
-
-/// Internal replacement for `static mut T`
-///
-/// Used to represent RTIC Resources
-///
-/// Soundness:
-/// 1) Unsafe API for internal use only
-/// 2) ``get_mut(&self) -> *mut T``
-/// returns a raw mutable pointer to the inner T
-/// casting to &mut T is under control of RTIC
-/// RTIC ensures &mut T to be unique under Rust aliasing rules.
-///
-/// Implementation uses the underlying ``UnsafeCell<T>``
-/// self.0.get() -> *mut T
-///
-/// 3) get(&self) -> *const T
-/// returns a raw immutable (const) pointer to the inner T
-/// casting to &T is under control of RTIC
-/// RTIC ensures &T to be shared under Rust aliasing rules.
-///
-/// Implementation uses the underlying ``UnsafeCell<T>``
-/// self.0.get() -> *mut T, demoted to *const T
-///
-#[repr(transparent)]
-pub struct RacyCell<T>(UnsafeCell<T>);
-
-impl<T> RacyCell<T> {
- /// Create a ``RacyCell``
- #[inline(always)]
- pub const fn new(value: T) -> Self {
- RacyCell(UnsafeCell::new(value))
- }
-
- /// Get `*mut T`
- ///
- /// # Safety
- ///
- /// See documentation notes for [`RacyCell`]
- #[inline(always)]
- pub unsafe fn get_mut(&self) -> *mut T {
- self.0.get()
- }
-
- /// Get `*const T`
- ///
- /// # Safety
- ///
- /// See documentation notes for [`RacyCell`]
- #[inline(always)]
- pub unsafe fn get(&self) -> *const T {
- self.0.get()
- }
-}
-
-unsafe impl<T> Sync for RacyCell<T> {}