aboutsummaryrefslogtreecommitdiff
path: root/drivers/edma
diff options
context:
space:
mode:
authorIan McIntyre <me@mciantyre.dev>2025-11-30 18:52:34 -0500
committerIan McIntyre <me@mciantyre.dev>2025-11-30 19:10:51 -0500
commit76199f21616ad86cf68f3b063c1ce23c6fc5a52f (patch)
tree4c076d0afd649803a2bd9a5ed5cbb1f1c74fb459 /drivers/edma
First commit
Diffstat (limited to 'drivers/edma')
-rw-r--r--drivers/edma/Cargo.toml7
-rw-r--r--drivers/edma/src/dma.rs161
-rw-r--r--drivers/edma/src/dmamux.rs16
-rw-r--r--drivers/edma/src/lib.rs990
-rw-r--r--drivers/edma/src/tcd.rs174
5 files changed, 1348 insertions, 0 deletions
diff --git a/drivers/edma/Cargo.toml b/drivers/edma/Cargo.toml
new file mode 100644
index 0000000..dc9086c
--- /dev/null
+++ b/drivers/edma/Cargo.toml
@@ -0,0 +1,7 @@
+[package]
+name = "imxrt-drivers-edma"
+version = "0.1.0"
+edition = "2024"
+
+[dependencies]
+ral-registers = { workspace = true }
diff --git a/drivers/edma/src/dma.rs b/drivers/edma/src/dma.rs
new file mode 100644
index 0000000..8f4cb9a
--- /dev/null
+++ b/drivers/edma/src/dma.rs
@@ -0,0 +1,161 @@
+//! DMA controllers.
+
+pub mod edma {
+ use crate::tcd::edma as tcd;
+
+ #[repr(C)]
+ #[allow(non_snake_case)]
+ pub struct RegisterBlock {
+ /// Control Register
+ pub CR: u32,
+ /// Error Status Register
+ pub ES: u32,
+ _reserved1: [u32; 1],
+ /// Enable Request Register
+ pub ERQ: u32,
+ _reserved2: [u32; 1],
+ /// Enable Error Interrupt Register
+ pub EEI: u32,
+ /// Clear Enable Error Interrupt Register
+ pub CEEI: u8,
+ /// Set Enable Error Interrupt Register
+ pub SEEI: u8,
+ /// Clear Enable Request Register
+ pub CERQ: u8,
+ /// Set Enable Request Register
+ pub SERQ: u8,
+ /// Clear DONE Status Bit Register
+ pub CDNE: u8,
+ /// Set START Bit Register
+ pub SSRT: u8,
+ /// Clear Error Register
+ pub CERR: u8,
+ /// Clear Interrupt Request Register
+ pub CINT: u8,
+ _reserved3: [u32; 1],
+ /// Interrupt Request Register
+ pub INT: u32,
+ _reserved4: [u32; 1],
+ /// Error Register
+ pub ERR: u32,
+ _reserved5: [u32; 1],
+ /// Hardware Request Status Register
+ pub HRS: u32,
+ _reserved6: [u32; 3],
+ /// Enable Asynchronous Request in Stop Register
+ pub EARS: u32,
+ _reserved7: [u32; 46],
+ /// Channel Priority Registers
+ pub DCHPRI: [u8; 32],
+ _reserved8: [u32; 952],
+ /// Transfer Control Descriptors
+ pub TCD: [tcd::RegisterBlock; 32],
+ }
+
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, DCHPRI) == 0x100);
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, TCD) == 0x1000);
+
+ /// Produce an index into `DCHPRI` for the given channel.
+ pub const fn dchpri_index(channel: usize) -> usize {
+ const X: [usize; 32] = [
+ 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20,
+ 27, 26, 25, 24, 31, 30, 29, 28,
+ ];
+ X[channel]
+ }
+
+ ral_registers::register! {
+ pub SERQ<u8> WO []
+ }
+
+ ral_registers::register! {
+ pub CERQ<u8> WO []
+ }
+
+ ral_registers::register! {
+ pub HRS<u32> RO []
+ }
+
+ ral_registers::register! {
+ pub INT<u32> RW []
+ }
+
+ ral_registers::register! {
+ pub CINT<u8> WO []
+ }
+
+ ral_registers::register! {
+ pub CDNE<u8> WO []
+ }
+
+ ral_registers::register! {
+ pub ERR<u32> RW []
+ }
+
+ ral_registers::register! {
+ pub CERR<u8> WO []
+ }
+
+ ral_registers::register! {
+ pub ERQ<u32> RW []
+ }
+}
+
+pub mod edma3 {
+ use crate::tcd::edma34 as tcd;
+
+ #[repr(C)]
+ #[allow(non_snake_case)]
+ pub struct RegisterBlock {
+ pub CSR: u32,
+ pub ES: u32,
+ pub INT: u32,
+ pub HRS: u32,
+ _reserved0: [u8; 0x100 - 0x10],
+ pub GRPRI: [u32; 32],
+ _reserved1: [u8; 0x1_0000 - 0x180],
+ pub TCD: [tcd::RegisterBlock; 32],
+ }
+
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, GRPRI) == 0x100);
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, TCD) == 0x1_0000);
+
+ ral_registers::register! {
+ pub CSR<u32> RW [
+ GMRC start(7) width(1) RW {}
+ ]
+ }
+
+ ral_registers::register! {
+ pub HRS<u32> RO []
+ }
+
+ ral_registers::register! {
+ pub INT<u32> RO []
+ }
+}
+
+pub mod edma4 {
+ use crate::tcd::edma34 as tcd;
+
+ #[repr(C)]
+ #[allow(non_snake_case)]
+ pub struct RegisterBlock {
+ pub CSR: u32,
+ pub ES: u32,
+ pub INT_LOW: u32,
+ pub INT_HIGH: u32,
+ pub HRS_LOW: u32,
+ pub HRS_HIGH: u32,
+ _reserved0: [u8; 0x100 - 0x18],
+ pub GRPRI: [u32; 64],
+ _reserved1: [u8; 0x1_0000 - 0x200],
+ pub TCD: [tcd::RegisterBlock; 64],
+ }
+
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, GRPRI) == 0x100);
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, TCD) == 0x1_0000);
+
+ pub use super::edma3::CSR;
+ pub use super::edma3::{HRS as HRS_LOW, HRS as HRS_HIGH};
+}
diff --git a/drivers/edma/src/dmamux.rs b/drivers/edma/src/dmamux.rs
new file mode 100644
index 0000000..79f63e0
--- /dev/null
+++ b/drivers/edma/src/dmamux.rs
@@ -0,0 +1,16 @@
+//! DMA multiplexer.
+
+#[repr(C)]
+#[allow(non_snake_case)]
+pub struct RegisterBlock {
+ pub CHCFG: [u32; 32],
+}
+
+ral_registers::register! {
+ pub CHCFG<u32> RW [
+ ENBL start(31) width(1) RW {}
+ TRIG start(30) width(1) RW {}
+ A_ON start(29) width(1) RW {}
+ SOURCE start(0) width(8) RW {}
+ ]
+}
diff --git a/drivers/edma/src/lib.rs b/drivers/edma/src/lib.rs
new file mode 100644
index 0000000..7a23953
--- /dev/null
+++ b/drivers/edma/src/lib.rs
@@ -0,0 +1,990 @@
+//! DMA blocks for i.MX RT MCUs.
+//!
+//! eDMA works for all i.MX RT 1000 and 1100 series MCUs.
+//! The eDMA3 and eDMA4 is specifically targeted for the
+//! 1180 MCUs.
+
+#![no_std]
+
+pub mod dma;
+pub mod dmamux;
+pub mod tcd;
+
+pub mod element {
+ /// An ID for an element size.
+ #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ #[non_exhaustive]
+ #[repr(u8)]
+ pub enum ElementSize {
+ /// `u8` transfer.
+ U8 = 0,
+ /// `u16` transfer.
+ U16 = 1,
+ /// `u32` transfer.
+ U32 = 2,
+ /// `u64` transfer.
+ U64 = 3,
+ }
+
+ /// Describes a transferrable DMA element; basically, an unsigned
+ /// integer of any size.
+ pub trait Element: Copy + private::Sealed {
+ /// An identifier describing the data transfer size
+ ///
+ /// Part of the TCD API; see documentation on TCD\[SSIZE\]
+ /// and TCD\[DSIZE\] for more information.
+ const DATA_TRANSFER_ID: ElementSize;
+ }
+
+ impl Element for u8 {
+ const DATA_TRANSFER_ID: ElementSize = ElementSize::U8;
+ }
+
+ impl Element for u16 {
+ const DATA_TRANSFER_ID: ElementSize = ElementSize::U16;
+ }
+
+ impl Element for u32 {
+ const DATA_TRANSFER_ID: ElementSize = ElementSize::U32;
+ }
+
+ impl Element for u64 {
+ const DATA_TRANSFER_ID: ElementSize = ElementSize::U64;
+ }
+
+ mod private {
+ pub trait Sealed {}
+
+ impl Sealed for u8 {}
+ impl Sealed for u16 {}
+ impl Sealed for u32 {}
+ impl Sealed for u64 {}
+ }
+}
+
+pub mod edma {
+ use core::num::NonZeroU8;
+
+ use crate::{dma::edma as dma, dmamux, element::ElementSize, tcd::edma as tcd};
+ use ral_registers::Instance;
+
+ /// A DMA channel for an eDMA controller.
+ ///
+ /// The channel is three pointers wide.
+ pub struct Channel {
+ index: usize,
+ dma: Instance<dma::RegisterBlock>,
+ mux: Instance<dmamux::RegisterBlock>,
+ }
+
+ impl Channel {
+ /// Create a new DMA channel.
+ ///
+ /// # Safety
+ ///
+ /// The channel formed by this call cannot alias another channel with the
+ /// same index. The channel index must be valid for the hardware.
+ pub const unsafe fn new(
+ dma: Instance<dma::RegisterBlock>,
+ mux: Instance<dmamux::RegisterBlock>,
+ index: usize,
+ ) -> Self {
+ Self { index, dma, mux }
+ }
+
+ /// Returns the channel index.
+ pub fn index(&self) -> usize {
+ self.index
+ }
+
+ /// Enable the DMA channel.
+ ///
+ /// # Safety
+ ///
+ /// If the channel is incorrectly configured, it may access
+ /// invalid memory.
+ pub unsafe fn enable(&mut self) {
+ ral_registers::write_reg!(dma, self.dma, SERQ, self.index as u8);
+ }
+
+ /// Disable the DMA channel.
+ pub fn disable(&mut self) {
+ ral_registers::write_reg!(dma, self.dma, CERQ, self.index as u8);
+ }
+
+ /// Returns `true` if the DMA channel is enabled.
+ pub fn is_enabled(&self) -> bool {
+ (1 << self.index) & ral_registers::read_reg!(dma, self.dma, ERQ) != 0
+ }
+
+ fn tcd(&self) -> Instance<tcd::RegisterBlock> {
+ // Safety: caller claims the index is valid for
+ // the DMA controller.
+ unsafe { Instance::new_unchecked(&raw mut (*self.dma.as_ptr()).TCD[self.index]) }
+ }
+
+ /// Returns `true` if the hardware is signaling requests towards
+ /// this DMA channel.
+ pub fn is_hardware_signaling(&self) -> bool {
+ (1 << self.index) & ral_registers::read_reg!(dma, self.dma, HRS) != 0
+ }
+
+ /// Returns `true` if an interrupt has activated for this
+ /// DMA channel.
+ pub fn is_interrupt(&self) -> bool {
+ (1 << self.index) & ral_registers::read_reg!(dma, self.dma, INT) != 0
+ }
+
+ /// Clear the interrupt trigger.
+ pub fn clear_interrupt(&self) {
+ ral_registers::write_reg!(dma, self.dma, CINT, self.index as u8);
+ }
+
+ /// Returns `true` if this DMA channel has finished its transfer.
+ pub fn is_done(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, DONE == 1)
+ }
+
+ /// Clear the "done" signal from software.
+ pub fn clear_done(&self) {
+ ral_registers::write_reg!(dma, self.dma, CDNE, self.index as u8);
+ }
+
+ /// Returns `true` if this channel has an error.
+ pub fn has_error(&self) -> bool {
+ (1 << self.index) & ral_registers::read_reg!(dma, self.dma, ERR) != 0
+ }
+
+ /// Clear this channel's error.
+ pub fn clear_error(&self) {
+ ral_registers::write_reg!(dma, self.dma, CERR, self.index as u8);
+ }
+
+ /// Returns `true` if this channel is actively transferring.
+ pub fn is_active(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, ACTIVE == 1)
+ }
+
+ /// Reset the control descriptor for the channel.
+ ///
+ /// Call this at least once, after acquiring the channel, to
+ /// put it into a known-good state.
+ pub fn reset(&mut self) {
+ unsafe { self.tcd().as_ptr().write_bytes(0, 1) };
+ }
+
+ /// Set the source address for a DMA transfer
+ ///
+ /// `saddr` should be a memory location that can provide the DMA controller
+ /// with data.
+ ///
+ /// # Safety
+ ///
+ /// If the DMA channel is already enabled, the DMA engine may start reading this
+ /// memory location. You must ensure that reads to `saddr` do not perform
+ /// inappropriate side effects. You must ensure `saddr` is valid for the
+ /// lifetime of the transfer.
+ pub unsafe fn set_source_address(&self, saddr: *const ()) {
+ ral_registers::write_reg!(tcd, self.tcd(), SADDR, saddr as u32);
+ }
+
+ /// Set the source offset *in bytes*
+ ///
+ /// `offset` could be negative, which would decrement the address.
+ ///
+ /// # Safety
+ ///
+ /// This method could allow a DMA engine to read beyond a buffer or
+ /// address. You must ensure that the source is valid for these offsets.
+ pub unsafe fn set_source_offset(&self, offset: i16) {
+ ral_registers::write_reg!(tcd, self.tcd(), SOFF, offset);
+ }
+
+ /// Set the destination address for a DMA transfer
+ ///
+ /// `daddr` should be a memory location that can store data from the
+ /// DMA controller.
+ ///
+ /// # Safety
+ ///
+ /// If the DMA channel is already enabled, the DMA engine may start
+ /// writing to this address. You must ensure that writes to `daddr`
+ /// are safe, and that the memory is valid for the lifetime of the
+ /// transfer.
+ pub unsafe fn set_destination_address(&self, daddr: *const ()) {
+ ral_registers::write_reg!(tcd, self.tcd(), DADDR, daddr as u32);
+ }
+
+ /// Set the destination offset *in bytes*
+ ///
+ /// `offset` could be negative, which would decrement the address.
+ ///
+ /// # Safety
+ ///
+ /// This method could allow a DMA engine to write beyond the range of
+ /// a buffer. You must ensure that the destination is valid for these
+ /// offsets.
+ pub unsafe fn set_destination_offset(&self, offset: i16) {
+ ral_registers::write_reg!(tcd, self.tcd(), DOFF, offset);
+ }
+
+ /// Set the transfer attributes.
+ ///
+ /// The attributes describes the ways the source is read and
+ /// the destination is written. The modulo mask allows the
+ /// buffer to be treated as a circular buffer.
+ ///
+ /// # Safety
+ ///
+ /// Incorrect sizes and modulos may cause invalid memory accesses.
+ pub unsafe fn set_attributes(
+ &self,
+ source_size: ElementSize,
+ source_modulo: u8,
+ destination_size: ElementSize,
+ destination_modulo: u8,
+ ) {
+ ral_registers::write_reg!(tcd, self.tcd(), ATTR,
+ SSIZE: source_size as u16,
+ SMOD: source_modulo as u16,
+ DSIZE: destination_size as u16,
+ DMOD: destination_modulo as u16,
+ );
+ }
+
+ /// Set the source last address adjustment *in bytes*
+ ///
+ /// # Safety
+ ///
+ /// This could allow the DMA engine to reference an invalid source buffer.
+ /// You must ensure that the adjustment performed by the DMA engine is
+ /// valid, assuming that another DMA transfer immediately runs after the
+ /// current transfer completes.
+ pub unsafe fn set_source_last_address_adjustment(&self, adjustment: i32) {
+ ral_registers::write_reg!(tcd, self.tcd(), SLAST, adjustment);
+ }
+
+ /// Set the destination last addrss adjustment *in bytes*
+ ///
+ /// # Safety
+ ///
+ /// This could allow the DMA engine to reference an invalid destination address.
+ /// You must ensure that the adjustment performed by the DMA engine is
+ /// valid, assuming that another DMA transfer immediately runs after the
+ /// current transfer completes.
+ pub unsafe fn set_destination_last_address_adjustment(&self, adjustment: i32) {
+ ral_registers::write_reg!(tcd, self.tcd(), DLAST_SGA, adjustment);
+ }
+
+ /// Set the number of *bytes* to transfer per minor loop
+ ///
+ /// Describes how many bytes we should transfer for each DMA service request.
+ /// Note that `nbytes` of `0` is interpreted as a 4GB transfer.
+ ///
+ /// # Safety
+ ///
+ /// This might allow the DMA engine to read beyond the source, or write beyond
+ /// the destination. Caller must ensure that the number of bytes per minor loop
+ /// is valid for the given transfer.
+ pub unsafe fn set_minor_loop_bytes(&self, nbytes: u32) {
+ ral_registers::write_reg!(tcd, self.tcd(), NBYTES, nbytes);
+ }
+
+ /// Tells the DMA channel how many transfer iterations to perform
+ ///
+ /// A 'transfer iteration' is a read from a source, and a write to a destination, with
+ /// read and write sizes described by a minor loop. Each iteration requires a DMA
+ /// service request, either from hardware or from software. The maximum number of iterations
+ /// is 2^15.
+ ///
+ /// # Safety
+ ///
+ /// This may allow the DMA engine to read beyond the source, or write beyond
+ /// the destination. Caller must ensure that the number of iterations is valid
+ /// for the transfer.
+ pub unsafe fn set_transfer_iterations(&mut self, iterations: u16) {
+ let iterations = iterations & 0x7FFF;
+ // Note that this is clearing the ELINK bit. We don't have support
+ // for channel-to-channel linking right now. Clearing ELINK is intentional
+ // to use the whole 15 bits for iterations.
+ ral_registers::write_reg!(tcd, self.tcd(), CITER, iterations);
+ ral_registers::write_reg!(tcd, self.tcd(), BITER, iterations);
+ }
+
+ /// Returns the beginning transfer iterations setting for the channel.
+ ///
+ /// This reflects the last call to `set_transfer_iterations`.
+ pub fn beginning_transfer_iterations(&self) -> u16 {
+ ral_registers::read_reg!(tcd, self.tcd(), BITER)
+ }
+
+ /// Enable or disable 'disable on completion'
+ ///
+ /// 'Disable on completion' lets the DMA channel automatically clear the request signal
+ /// when it completes a transfer.
+ pub fn set_disable_on_completion(&mut self, dreq: bool) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, DREQ: dreq as u16);
+ }
+
+ /// Enable or disable interrupt generation when the transfer completes
+ ///
+ /// You're responsible for registering your interrupt handler.
+ pub fn set_interrupt_on_completion(&mut self, intr: bool) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, INTMAJOR: intr as u16);
+ }
+
+ /// Start a DMA transfer
+ ///
+ /// `start()` should be used to request service from the DMA controller. It's
+ /// necessary for in-memory DMA transfers. Do not use it for hardware-initiated
+ /// DMA transfers. DMA transfers that involve hardware will rely on the hardware
+ /// to request DMA service.
+ ///
+ /// Flag is automatically cleared by hardware after it's asserted.
+ pub fn start(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, START: 1);
+ }
+
+ /// Set the source signal for channel activation.
+ ///
+ /// The source is typically an upstream peripheral.
+ /// Use `None` to clear the source.
+ ///
+ /// This call reads back the written source. If they're equal
+ /// the return is `true`. The values may not be equal if the
+ /// source is selected in another DMA channel.
+ pub fn set_mux_source_signal(&self, signal: Option<NonZeroU8>) -> bool {
+ let signal = signal.map_or(0, NonZeroU8::get) as u32;
+ ral_registers::modify_reg!(dmamux, self.mux, CHCFG[self.index], SOURCE: signal);
+ signal == ral_registers::read_reg!(dmamux, self.mux, CHCFG[self.index], SOURCE)
+ }
+
+ /// Enable or disable the DMA MUX source signaling / triggering.
+ ///
+ /// Make sure to set up a source signal before enabling.
+ pub fn set_mux_source_enable(&self, enable: bool) {
+ ral_registers::modify_reg!(dmamux, self.mux, CHCFG[self.index], ENBL: enable as u32);
+ }
+
+ /// Set periodic triggering for this DMA channel.
+ ///
+ /// Only the first four DMA channels can periodically trigger
+ /// on a PIT timer. Note that this isn't enabled.
+ ///
+ /// # Panics
+ ///
+ /// Panics if this isn't one of the first four DMA channels.
+ pub fn set_mux_periodic(&self, periodic: bool) {
+ assert!(self.index < 4);
+ ral_registers::modify_reg!(dmamux, self.mux, CHCFG[self.index], TRIG: periodic as u32);
+ }
+ }
+}
+
+pub mod edma3 {
+ use core::num::NonZeroU8;
+
+ use crate::{dma::edma3 as dma, element::ElementSize, tcd::edma34 as tcd};
+ use ral_registers::Instance;
+
+ /// A DMA channel for an eDMA controller.
+ ///
+ /// The channel is two pointers wide.
+ pub struct Channel {
+ index: usize,
+ dma: Instance<dma::RegisterBlock>,
+ }
+
+ impl Channel {
+ /// Create a new DMA channel.
+ ///
+ /// # Safety
+ ///
+ /// The channel formed by this call cannot alias another channel with the
+ /// same index. The channel index must be valid for the hardware.
+ pub const unsafe fn new(dma: Instance<dma::RegisterBlock>, index: usize) -> Self {
+ Self { index, dma }
+ }
+
+ /// Enable the DMA channel.
+ ///
+ /// # Safety
+ ///
+ /// If the channel is incorrectly configured, it may access
+ /// invalid memory.
+ pub unsafe fn enable(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, ERQ: 1, DONE: 0);
+ }
+
+ /// Disable the DMA channel.
+ pub fn disable(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, ERQ: 0, DONE: 0);
+ }
+
+ /// Returns `true` if the DMA channel is enabled.
+ pub fn is_enabled(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, ERQ != 0)
+ }
+
+ fn tcd(&self) -> Instance<tcd::RegisterBlock> {
+ // Safety: caller claims the index is valid for
+ // the DMA controller.
+ unsafe { Instance::new_unchecked(&raw mut (*self.dma.as_ptr()).TCD[self.index]) }
+ }
+
+ /// Returns `true` if the hardware is signaling requests towards
+ /// this DMA channel.
+ pub fn is_hardware_signaling(&self) -> bool {
+ (1 << self.index) & ral_registers::read_reg!(dma, self.dma, HRS) != 0
+ }
+
+ /// Returns `true` if an interrupt has activated for this
+ /// DMA channel.
+ pub fn is_interrupt(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), INT) != 0
+ }
+
+ /// Clear the interrupt trigger.
+ pub fn clear_interrupt(&self) {
+ ral_registers::write_reg!(tcd, self.tcd(), INT, 1);
+ }
+
+ /// Returns `true` if this DMA channel has finished its transfer.
+ pub fn is_done(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, DONE == 1)
+ }
+
+ /// Clear the "done" signal from software.
+ pub fn clear_done(&self) {
+ // Safety: the CSR register is the first register of the
+ // TCD. The peripheral tolerates 8-bit access. We can poke
+ // the DONE bit with a single write without risking a race
+ // in the lower bits of the register.
+ //
+ // I should rewrite the register block to lay out the data
+ // like I want...
+ unsafe {
+ let csr: *mut u32 = &raw mut (*self.tcd().as_ptr()).CSR;
+ csr.cast::<u8>().add(3).write_volatile(1 << 6)
+ };
+ }
+
+ /// Returns `true` if this channel has an error.
+ pub fn has_error(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), ES) != 0
+ }
+
+ /// Clear this channel's error.
+ pub fn clear_error(&self) {
+ ral_registers::write_reg!(tcd, self.tcd(), ES, ERR: 1);
+ }
+
+ /// Returns `true` if this channel is actively transferring.
+ pub fn is_active(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, ACTIVE == 1)
+ }
+
+ /// Reset the control descriptor for the channel.
+ ///
+ /// Call this at least once, after acquiring the channel, to
+ /// put it into a known-good state.
+ pub fn reset(&mut self) {
+ unsafe {
+ let tcd = &raw mut (*self.tcd().as_ptr()).TCD;
+ tcd.write_bytes(0, 1);
+ };
+ }
+
+ /// Set the source address for a DMA transfer
+ ///
+ /// `saddr` should be a memory location that can provide the DMA controller
+ /// with data.
+ ///
+ /// # Safety
+ ///
+ /// If the DMA channel is already enabled, the DMA engine may start reading this
+ /// memory location. You must ensure that reads to `saddr` do not perform
+ /// inappropriate side effects. You must ensure `saddr` is valid for the
+ /// lifetime of the transfer.
+ pub unsafe fn set_source_address(&self, saddr: *const ()) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.SADDR, saddr as u32);
+ }
+
+ /// Set the source offset *in bytes*
+ ///
+ /// `offset` could be negative, which would decrement the address.
+ ///
+ /// # Safety
+ ///
+ /// This method could allow a DMA engine to read beyond a buffer or
+ /// address. You must ensure that the source is valid for these offsets.
+ pub unsafe fn set_source_offset(&self, offset: i16) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.SOFF, offset);
+ }
+
+ /// Set the destination address for a DMA transfer
+ ///
+ /// `daddr` should be a memory location that can store data from the
+ /// DMA controller.
+ ///
+ /// # Safety
+ ///
+ /// If the DMA channel is already enabled, the DMA engine may start
+ /// writing to this address. You must ensure that writes to `daddr`
+ /// are safe, and that the memory is valid for the lifetime of the
+ /// transfer.
+ pub unsafe fn set_destination_address(&self, daddr: *const ()) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.DADDR, daddr as u32);
+ }
+
+ /// Set the destination offset *in bytes*
+ ///
+ /// `offset` could be negative, which would decrement the address.
+ ///
+ /// # Safety
+ ///
+ /// This method could allow a DMA engine to write beyond the range of
+ /// a buffer. You must ensure that the destination is valid for these
+ /// offsets.
+ pub unsafe fn set_destination_offset(&self, offset: i16) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.DOFF, offset);
+ }
+
+ /// Set the transfer attributes.
+ ///
+ /// The attributes describes the ways the source is read and
+ /// the destination is written. The modulo mask allows the
+ /// buffer to be treated as a circular buffer.
+ ///
+ /// # Safety
+ ///
+ /// Incorrect sizes and modulos may cause invalid memory accesses.
+ pub unsafe fn set_attributes(
+ &self,
+ source_size: ElementSize,
+ source_modulo: u8,
+ destination_size: ElementSize,
+ destination_modulo: u8,
+ ) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.ATTR,
+ SSIZE: source_size as u16,
+ SMOD: source_modulo as u16,
+ DSIZE: destination_size as u16,
+ DMOD: destination_modulo as u16,
+ );
+ }
+
+ /// Set the source last address adjustment *in bytes*
+ ///
+ /// # Safety
+ ///
+ /// This could allow the DMA engine to reference an invalid source buffer.
+ /// You must ensure that the adjustment performed by the DMA engine is
+ /// valid, assuming that another DMA transfer immediately runs after the
+ /// current transfer completes.
+ pub unsafe fn set_source_last_address_adjustment(&self, adjustment: i32) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.SLAST, adjustment);
+ }
+
+ /// Set the destination last addrss adjustment *in bytes*
+ ///
+ /// # Safety
+ ///
+ /// This could allow the DMA engine to reference an invalid destination address.
+ /// You must ensure that the adjustment performed by the DMA engine is
+ /// valid, assuming that another DMA transfer immediately runs after the
+ /// current transfer completes.
+ pub unsafe fn set_destination_last_address_adjustment(&self, adjustment: i32) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.DLAST_SGA, adjustment);
+ }
+
+ /// Set the number of *bytes* to transfer per minor loop
+ ///
+ /// Describes how many bytes we should transfer for each DMA service request.
+ /// Note that `nbytes` of `0` is interpreted as a 4GB transfer.
+ ///
+ /// # Safety
+ ///
+ /// This might allow the DMA engine to read beyond the source, or write beyond
+ /// the destination. Caller must ensure that the number of bytes per minor loop
+ /// is valid for the given transfer.
+ pub unsafe fn set_minor_loop_bytes(&self, nbytes: u32) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.NBYTES, nbytes);
+ }
+
+ /// Tells the DMA channel how many transfer iterations to perform
+ ///
+ /// A 'transfer iteration' is a read from a source, and a write to a destination, with
+ /// read and write sizes described by a minor loop. Each iteration requires a DMA
+ /// service request, either from hardware or from software. The maximum number of iterations
+ /// is 2^15.
+ ///
+ /// # Safety
+ ///
+ /// This may allow the DMA engine to read beyond the source, or write beyond
+ /// the destination. Caller must ensure that the number of iterations is valid
+ /// for the transfer.
+ pub unsafe fn set_transfer_iterations(&mut self, iterations: u16) {
+ let iterations = iterations & 0x7FFF;
+ // Note that this is clearing the ELINK bit. We don't have support
+ // for channel-to-channel linking right now. Clearing ELINK is intentional
+ // to use the whole 15 bits for iterations.
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.CITER, iterations);
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.BITER, iterations);
+ }
+
+ /// Returns the beginning transfer iterations setting for the channel.
+ ///
+ /// This reflects the last call to `set_transfer_iterations`.
+ pub fn beginning_transfer_iterations(&self) -> u16 {
+ ral_registers::read_reg!(tcd, self.tcd(), TCD.BITER)
+ }
+
+ /// Enable or disable 'disable on completion'
+ ///
+ /// 'Disable on completion' lets the DMA channel automatically clear the request signal
+ /// when it completes a transfer.
+ pub fn set_disable_on_completion(&mut self, dreq: bool) {
+ ral_registers::modify_reg!(tcd, self.tcd(), TCD.CSR, DREQ: dreq as u16);
+ }
+
+ /// Enable or disable interrupt generation when the transfer completes
+ ///
+ /// You're responsible for registering your interrupt handler.
+ pub fn set_interrupt_on_completion(&mut self, intr: bool) {
+ ral_registers::modify_reg!(tcd, self.tcd(), TCD.CSR, INTMAJOR: intr as u16);
+ }
+
+ /// Start a DMA transfer
+ ///
+ /// `start()` should be used to request service from the DMA controller. It's
+ /// necessary for in-memory DMA transfers. Do not use it for hardware-initiated
+ /// DMA transfers. DMA transfers that involve hardware will rely on the hardware
+ /// to request DMA service.
+ ///
+ /// Flag is automatically cleared by hardware after it's asserted.
+ pub fn start(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), TCD.CSR, START: 1);
+ }
+
+ /// Set the source signal for channel activation.
+ ///
+ /// The source is typically an upstream peripheral.
+ /// Use `None` to disable the source.
+ ///
+ /// This call reads back the written source. If they're equal
+ /// the return is `true`. The values may not be equal if the
+ /// source is selected in another DMA channel.
+ pub fn set_source_signal(&self, signal: Option<NonZeroU8>) -> bool {
+ let signal = signal.map_or(0, NonZeroU8::get) as u32;
+ ral_registers::write_reg!(tcd, self.tcd(), MUX, SRC: signal);
+ signal == ral_registers::read_reg!(tcd, self.tcd(), MUX, SRC)
+ }
+ }
+}
+
+pub mod edma4 {
+ use core::num::NonZeroU8;
+
+ use crate::{dma::edma4 as dma, element::ElementSize, tcd::edma34 as tcd};
+ use ral_registers::Instance;
+
+ /// A DMA channel for an eDMA controller.
+ ///
+ /// The channel is two pointers wide.
+ pub struct Channel {
+ index: usize,
+ dma: Instance<dma::RegisterBlock>,
+ }
+
+ impl Channel {
+ /// Create a new DMA channel.
+ ///
+ /// # Safety
+ ///
+ /// The channel formed by this call cannot alias another channel with the
+ /// same index. The channel index must be valid for the hardware.
+ pub const unsafe fn new(dma: Instance<dma::RegisterBlock>, index: usize) -> Self {
+ Self { index, dma }
+ }
+
+ /// Enable the DMA channel.
+ ///
+ /// # Safety
+ ///
+ /// If the channel is incorrectly configured, it may access
+ /// invalid memory.
+ pub unsafe fn enable(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, ERQ: 1, DONE: 0);
+ }
+
+ /// Disable the DMA channel.
+ pub fn disable(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), CSR, ERQ: 0, DONE: 0);
+ }
+
+ /// Returns `true` if the DMA channel is enabled.
+ pub fn is_enabled(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, ERQ != 0)
+ }
+
+ fn tcd(&self) -> Instance<tcd::RegisterBlock> {
+ // Safety: caller claims the index is valid for
+ // the DMA controller.
+ unsafe { Instance::new_unchecked(&raw mut (*self.dma.as_ptr()).TCD[self.index]) }
+ }
+
+ /// Returns `true` if the hardware is signaling requests towards
+ /// this DMA channel.
+ pub fn is_hardware_signaling(&self) -> bool {
+ if self.index < 32 {
+ (1 << self.index) & ral_registers::read_reg!(dma, self.dma, HRS_LOW) != 0
+ } else {
+ (1 << (self.index - 32)) & ral_registers::read_reg!(dma, self.dma, HRS_HIGH) != 0
+ }
+ }
+
+ /// Returns `true` if an interrupt has activated for this
+ /// DMA channel.
+ pub fn is_interrupt(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), INT) != 0
+ }
+
+ /// Clear the interrupt trigger.
+ pub fn clear_interrupt(&self) {
+ ral_registers::write_reg!(tcd, self.tcd(), INT, 1);
+ }
+
+ /// Returns `true` if this DMA channel has finished its transfer.
+ pub fn is_done(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, DONE == 1)
+ }
+
+ /// Clear the "done" signal from software.
+ pub fn clear_done(&self) {
+ // Safety: the CSR register is the first register of the
+ // TCD. The peripheral tolerates 8-bit access. We can poke
+ // the DONE bit with a single write without risking a race
+ // in the lower bits of the register.
+ //
+ // I should rewrite the register block to lay out the data
+ // like I want...
+ unsafe {
+ let csr: *mut u32 = &raw mut (*self.tcd().as_ptr()).CSR;
+ csr.cast::<u8>().add(3).write_volatile(1 << 6)
+ };
+ }
+
+ /// Returns `true` if this channel has an error.
+ pub fn has_error(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), ES) != 0
+ }
+
+ /// Clear this channel's error.
+ pub fn clear_error(&self) {
+ ral_registers::write_reg!(tcd, self.tcd(), ES, ERR: 1);
+ }
+
+ /// Returns `true` if this channel is actively transferring.
+ pub fn is_active(&self) -> bool {
+ ral_registers::read_reg!(tcd, self.tcd(), CSR, ACTIVE == 1)
+ }
+
+ /// Reset the control descriptor for the channel.
+ ///
+ /// Call this at least once, after acquiring the channel, to
+ /// put it into a known-good state.
+ pub fn reset(&mut self) {
+ unsafe {
+ let tcd = &raw mut (*self.tcd().as_ptr()).TCD;
+ tcd.write_bytes(0, 1);
+ };
+ }
+
+ /// Set the source address for a DMA transfer
+ ///
+ /// `saddr` should be a memory location that can provide the DMA controller
+ /// with data.
+ ///
+ /// # Safety
+ ///
+ /// If the DMA channel is already enabled, the DMA engine may start reading this
+ /// memory location. You must ensure that reads to `saddr` do not perform
+ /// inappropriate side effects. You must ensure `saddr` is valid for the
+ /// lifetime of the transfer.
+ pub unsafe fn set_source_address(&self, saddr: *const ()) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.SADDR, saddr as u32);
+ }
+
+ /// Set the source offset *in bytes*
+ ///
+ /// `offset` could be negative, which would decrement the address.
+ ///
+ /// # Safety
+ ///
+ /// This method could allow a DMA engine to read beyond a buffer or
+ /// address. You must ensure that the source is valid for these offsets.
+ pub unsafe fn set_source_offset(&self, offset: i16) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.SOFF, offset);
+ }
+
+ /// Set the destination address for a DMA transfer
+ ///
+ /// `daddr` should be a memory location that can store data from the
+ /// DMA controller.
+ ///
+ /// # Safety
+ ///
+ /// If the DMA channel is already enabled, the DMA engine may start
+ /// writing to this address. You must ensure that writes to `daddr`
+ /// are safe, and that the memory is valid for the lifetime of the
+ /// transfer.
+ pub unsafe fn set_destination_address(&self, daddr: *const ()) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.DADDR, daddr as u32);
+ }
+
+ /// Set the destination offset *in bytes*
+ ///
+ /// `offset` could be negative, which would decrement the address.
+ ///
+ /// # Safety
+ ///
+ /// This method could allow a DMA engine to write beyond the range of
+ /// a buffer. You must ensure that the destination is valid for these
+ /// offsets.
+ pub unsafe fn set_destination_offset(&self, offset: i16) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.DOFF, offset);
+ }
+
+ /// Set the transfer attributes.
+ ///
+ /// The attributes describes the ways the source is read and
+ /// the destination is written. The modulo mask allows the
+ /// buffer to be treated as a circular buffer.
+ ///
+ /// # Safety
+ ///
+ /// Incorrect sizes and modulos may cause invalid memory accesses.
+ pub unsafe fn set_attributes(
+ &self,
+ source_size: ElementSize,
+ source_modulo: u8,
+ destination_size: ElementSize,
+ destination_modulo: u8,
+ ) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.ATTR,
+ SSIZE: source_size as u16,
+ SMOD: source_modulo as u16,
+ DSIZE: destination_size as u16,
+ DMOD: destination_modulo as u16,
+ );
+ }
+
+ /// Set the source last address adjustment *in bytes*
+ ///
+ /// # Safety
+ ///
+ /// This could allow the DMA engine to reference an invalid source buffer.
+ /// You must ensure that the adjustment performed by the DMA engine is
+ /// valid, assuming that another DMA transfer immediately runs after the
+ /// current transfer completes.
+ pub unsafe fn set_source_last_address_adjustment(&self, adjustment: i32) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.SLAST, adjustment);
+ }
+
+ /// Set the destination last addrss adjustment *in bytes*
+ ///
+ /// # Safety
+ ///
+ /// This could allow the DMA engine to reference an invalid destination address.
+ /// You must ensure that the adjustment performed by the DMA engine is
+ /// valid, assuming that another DMA transfer immediately runs after the
+ /// current transfer completes.
+ pub unsafe fn set_destination_last_address_adjustment(&self, adjustment: i32) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.DLAST_SGA, adjustment);
+ }
+
+ /// Set the number of *bytes* to transfer per minor loop
+ ///
+ /// Describes how many bytes we should transfer for each DMA service request.
+ /// Note that `nbytes` of `0` is interpreted as a 4GB transfer.
+ ///
+ /// # Safety
+ ///
+ /// This might allow the DMA engine to read beyond the source, or write beyond
+ /// the destination. Caller must ensure that the number of bytes per minor loop
+ /// is valid for the given transfer.
+ pub unsafe fn set_minor_loop_bytes(&self, nbytes: u32) {
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.NBYTES, nbytes);
+ }
+
+ /// Tells the DMA channel how many transfer iterations to perform
+ ///
+ /// A 'transfer iteration' is a read from a source, and a write to a destination, with
+ /// read and write sizes described by a minor loop. Each iteration requires a DMA
+ /// service request, either from hardware or from software. The maximum number of iterations
+ /// is 2^15.
+ ///
+ /// # Safety
+ ///
+ /// This may allow the DMA engine to read beyond the source, or write beyond
+ /// the destination. Caller must ensure that the number of iterations is valid
+ /// for the transfer.
+ pub unsafe fn set_transfer_iterations(&mut self, iterations: u16) {
+ let iterations = iterations & 0x7FFF;
+ // Note that this is clearing the ELINK bit. We don't have support
+ // for channel-to-channel linking right now. Clearing ELINK is intentional
+ // to use the whole 15 bits for iterations.
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.CITER, iterations);
+ ral_registers::write_reg!(tcd, self.tcd(), TCD.BITER, iterations);
+ }
+
+ /// Returns the beginning transfer iterations setting for the channel.
+ ///
+ /// This reflects the last call to `set_transfer_iterations`.
+ pub fn beginning_transfer_iterations(&self) -> u16 {
+ ral_registers::read_reg!(tcd, self.tcd(), TCD.BITER)
+ }
+
+ /// Enable or disable 'disable on completion'
+ ///
+ /// 'Disable on completion' lets the DMA channel automatically clear the request signal
+ /// when it completes a transfer.
+ pub fn set_disable_on_completion(&mut self, dreq: bool) {
+ ral_registers::modify_reg!(tcd, self.tcd(), TCD.CSR, DREQ: dreq as u16);
+ }
+
+ /// Enable or disable interrupt generation when the transfer completes
+ ///
+ /// You're responsible for registering your interrupt handler.
+ pub fn set_interrupt_on_completion(&mut self, intr: bool) {
+ ral_registers::modify_reg!(tcd, self.tcd(), TCD.CSR, INTMAJOR: intr as u16);
+ }
+
+ /// Start a DMA transfer
+ ///
+ /// `start()` should be used to request service from the DMA controller. It's
+ /// necessary for in-memory DMA transfers. Do not use it for hardware-initiated
+ /// DMA transfers. DMA transfers that involve hardware will rely on the hardware
+ /// to request DMA service.
+ ///
+ /// Flag is automatically cleared by hardware after it's asserted.
+ pub fn start(&mut self) {
+ ral_registers::modify_reg!(tcd, self.tcd(), TCD.CSR, START: 1);
+ }
+
+ /// Set the source signal for channel activation.
+ ///
+ /// The source is typically an upstream peripheral.
+ /// Use `None` to disable the source.
+ ///
+ /// This call reads back the written source. If they're equal
+ /// the return is `true`. The values may not be equal if the
+ /// source is selected in another DMA channel.
+ pub fn set_source_signal(&self, signal: Option<NonZeroU8>) -> bool {
+ let signal = signal.map_or(0, NonZeroU8::get) as u32;
+ ral_registers::write_reg!(tcd, self.tcd(), MUX, SRC: signal);
+ signal == ral_registers::read_reg!(tcd, self.tcd(), MUX, SRC)
+ }
+ }
+}
diff --git a/drivers/edma/src/tcd.rs b/drivers/edma/src/tcd.rs
new file mode 100644
index 0000000..1e76533
--- /dev/null
+++ b/drivers/edma/src/tcd.rs
@@ -0,0 +1,174 @@
+//! Transfer control descriptor.
+
+/// Common TCD layout for all eDMA implementations.
+///
+/// Fields may vary. See inline notes.
+mod common {
+ #[repr(C, align(32))]
+ #[allow(non_snake_case)]
+ pub struct RegisterBlock {
+ pub SADDR: u32,
+ // Signed numbers for offsets / 'last' members intentional.
+ // The hardware treats them as signed numbers.
+ pub SOFF: i16,
+ pub ATTR: u16,
+ pub NBYTES: u32,
+ pub SLAST: i32,
+ pub DADDR: u32,
+ pub DOFF: i16,
+ /// The minor loop channel link field may vary in size
+ /// depending on the implementation. Not worried right
+ /// now, since we don't support minor loop linking.
+ pub CITER: u16,
+ pub DLAST_SGA: i32,
+ /// These fields vary across all of eDMA, eDMA3 and
+ /// eDMA4!
+ ///
+ /// Major loop channel linking field size changes as
+ /// a function of the number of DMA channels.
+ ///
+ /// eDMA and eDMA3 have bandwidth control. eDMA4 has
+ /// transfer mode control for read-only / write-only
+ /// DMA transfers. Field is the same size.
+ ///
+ /// In the low byte, high nibble, eDMA has DONE and
+ /// ACTIVE. eDMA3 and eDMA4 have things we probably don't
+ /// need. Low byte, low nibble is the same.
+ ///
+ /// So we'll need to change how we handle DONE and
+ /// ACTIVE access. They can't always dispatch to this
+ /// register.
+ pub CSR: u16,
+ /// See CITER documentation note about eDMA3 bitfield
+ /// size when minor loop channel linking is enabled.
+ pub BITER: u16,
+ }
+
+ const _: () = assert!(32 == size_of::<RegisterBlock>());
+
+ ral_registers::register! {
+ pub SADDR<u32> RW []
+ }
+
+ ral_registers::register! {
+ pub SOFF<i16> RW []
+ }
+
+ ral_registers::register! {
+ pub ATTR<u16> RW [
+ #[doc = "Destination data transfer size"]
+ DSIZE start(0) width(3) RW {}
+ #[doc = "Destination Address Modulo"]
+ DMOD start(3) width(5) RW {}
+ #[doc = "Source data transfer size"]
+ SSIZE start(8) width(3) RW {}
+ #[doc = "Source Address Modulo"]
+ SMOD start(11) width(5) RW {}
+ ]
+ }
+
+ ral_registers::register! {
+ pub NBYTES<u32> RW []
+ }
+
+ ral_registers::register! {
+ pub SLAST<i32> RW []
+ }
+
+ ral_registers::register! {
+ pub DADDR<u32> RW []
+ }
+
+ ral_registers::register! {
+ pub DOFF<i16> RW []
+ }
+
+ ral_registers::register! {
+ pub CITER<u16> RW []
+ }
+
+ ral_registers::register! {
+ pub DLAST_SGA<i32> RW []
+ }
+
+ ral_registers::register! {
+ pub CSR<u16> RW [
+ START start(0) width(1) RW {}
+ INTMAJOR start(1) width(1) RW {}
+ DREQ start(3) width(1) RW {}
+ ACTIVE start(6) width(1) RW {}
+ DONE start(7) width(1) RW {}
+ ]
+ }
+
+ ral_registers::register! {
+ pub BITER<u16> RW []
+ }
+}
+
+pub mod edma {
+ pub use super::common::*;
+}
+
+pub mod edma34 {
+ #[repr(C, align(32))]
+ #[allow(non_snake_case)]
+ pub struct RegisterBlock {
+ pub CSR: u32,
+ pub ES: u32,
+ pub INT: u32,
+ pub SBR: u32,
+ pub PRI: u32,
+ pub MUX: u32,
+ /// Available on eDMA4, and reserved on eDMA3.
+ _mattr: u32,
+ pub TCD: super::common::RegisterBlock,
+ _reserved: [u8; 0x1_0000 - (4 * 8 + size_of::<super::common::RegisterBlock>())],
+ }
+
+ const _: () = assert!(core::mem::offset_of!(RegisterBlock, TCD) == 0x20);
+ const _: () = assert!(size_of::<RegisterBlock>() == 0x1_0000);
+
+ ral_registers::register! {
+ pub CSR<u32> RW [
+ ACTIVE start(31) width(1) RW {}
+ DONE start(30) width(1) RW {}
+ ERQ start(0) width(1) RW {}
+ ]
+ }
+
+ ral_registers::register! {
+ pub SBR<u32> RW [
+ EMI start(16) width(1) RW {}
+ PAL start(15) width(1) RW {}
+ SEC start(14) width(1) RW {}
+ ]
+ }
+
+ ral_registers::register! {
+ pub INT<u32> RW []
+ }
+
+ ral_registers::register! {
+ pub ES<u32> RW [
+ ERR start(31) width(1) RW {}
+ ]
+ }
+
+ ral_registers::register! {
+ pub MUX<u32> RW [
+ SRC start(0) width(6) RW {}
+ ]
+ }
+
+ #[allow(non_snake_case)]
+ pub mod TCD {
+ pub use crate::tcd::common::{
+ ATTR, BITER, CITER, DADDR, DLAST_SGA, DOFF, NBYTES, SADDR, SLAST, SOFF,
+ };
+
+ pub mod CSR {
+ pub use crate::tcd::common::CSR::{DREQ, INTMAJOR, START, access};
+ }
+ }
+}