aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorIan McIntyre <ianpmcintyre@gmail.com>2023-10-22 18:23:56 -0400
committerIan McIntyre <ianpmcintyre@gmail.com>2023-11-26 15:11:27 -0500
commitc7e5123f6604fbb9ca510f01af9b60e777bf57b4 (patch)
tree3eea4d033a5b091d6d52da6d45f754b3be0de132 /src
First commit
A prototype of an i.MX RT ENET driver. There's design decisions I'm thinking of changing. Nevertheless, the smoltcp support seems to be working; an 1170EVK can act as a DHCP client and a TCP loopback server.
Diffstat (limited to 'src')
-rw-r--r--src/bd.rs279
-rw-r--r--src/bd/rxbd.rs95
-rw-r--r--src/bd/txbd.rs91
-rw-r--r--src/lib.rs361
4 files changed, 826 insertions, 0 deletions
diff --git a/src/bd.rs b/src/bd.rs
new file mode 100644
index 0000000..1bfe9f7
--- /dev/null
+++ b/src/bd.rs
@@ -0,0 +1,279 @@
+//! Enhanced buffer descriptors.
+//!
+//! Buffer descriptors (BD) are defined with register access layer (RAL) compatibility.
+//! These definitions come from the i.MX RT 1170 reference manual, revision 2.
+
+macro_rules! bdfields {
+ ($name:ident, $type:ty $(, $field:ident [ offset = $offset:expr, bits = $bits:expr, $( $enum:ident = $val:expr $(,)? )* ] $(,)? )*) => {
+ pub mod $name {
+ $(
+ pub mod $field {
+ #![allow(non_snake_case, non_upper_case_globals, dead_code)]
+ // Assuming all fields are read-write.
+ pub mod R {}
+ pub mod W {}
+ pub mod RW {
+ $(
+ pub const $enum: $type = $val;
+ )*
+ }
+ pub const offset: $type = $offset;
+ pub const mask: $type = ((1 << $bits) - 1) << offset;
+ }
+ )*
+ }
+ };
+}
+
+pub(crate) mod rxbd;
+pub(crate) mod txbd;
+
+use core::{cell::UnsafeCell, mem::MaybeUninit, sync::atomic::Ordering};
+
+#[repr(align(64))]
+struct DescriptorRing<D, const N: usize>(UnsafeCell<MaybeUninit<[D; N]>>);
+unsafe impl<D, const N: usize> Sync for DescriptorRing<D, N> {}
+
+impl<D, const N: usize> DescriptorRing<D, N> {
+ const fn new() -> Self {
+ Self(UnsafeCell::new(MaybeUninit::uninit()))
+ }
+
+ /// # Safety
+ ///
+ /// Can only be called once. Multiple calls will release multiple mutable references
+ /// to the same memory.
+ unsafe fn init(&mut self) -> &mut [D] {
+ let ring: *mut MaybeUninit<[D; N]> = self.0.get();
+ // Transparent elements let us treat each element as uninitialized.
+ let ring: *mut [MaybeUninit<D>; N] = ring.cast();
+ // Array pointer == pointer to first element.
+ let ring: *mut MaybeUninit<D> = ring.cast();
+
+ for descriptor in 0..N {
+ // Safety: D is either a TX or RX buffer descriptor. It's OK
+ // to initialize them to a bitpattern of zero. This pointer
+ // is valid for all descriptor offsets.
+ unsafe { ring.add(descriptor).write(MaybeUninit::zeroed()) };
+ }
+
+ // Safety: all descriptors are initialized to zero.
+ unsafe { core::slice::from_raw_parts_mut(ring.cast(), N) }
+ }
+}
+
+#[repr(align(64))]
+#[derive(Clone, Copy)]
+struct DataBuffer<const N: usize>([u8; N]);
+unsafe impl<const N: usize> Sync for DataBuffer<N> {}
+
+pub struct IoBuffers<D, const COUNT: usize, const MTU: usize> {
+ ring: DescriptorRing<D, COUNT>,
+ buffers: UnsafeCell<[DataBuffer<MTU>; COUNT]>,
+}
+unsafe impl<D, const COUNT: usize, const MTU: usize> Sync for IoBuffers<D, COUNT, MTU> {}
+
+pub type TransmitBuffers<const COUNT: usize, const MTU: usize> = IoBuffers<txbd::TxBD, COUNT, MTU>;
+pub type ReceiveBuffers<const COUNT: usize, const MTU: usize> = IoBuffers<rxbd::RxBD, COUNT, MTU>;
+
+impl<D, const COUNT: usize, const MTU: usize> IoBuffers<D, COUNT, MTU> {
+ const MTU_IS_MULTIPLE_OF_16: () = assert!(MTU % 16 == 0);
+
+ pub const fn new() -> Self {
+ #[allow(clippy::let_unit_value)] // Force evaluation.
+ let _: () = Self::MTU_IS_MULTIPLE_OF_16;
+ Self {
+ ring: DescriptorRing::new(),
+ buffers: UnsafeCell::new([DataBuffer([0; MTU]); COUNT]),
+ }
+ }
+
+ fn init(
+ &'static mut self,
+ init_descriptors: impl Fn(&mut [D], &mut [DataBuffer<MTU>]),
+ ) -> IoSlices<D> {
+ // Safety: by taking 'static mut reference, we
+ // ensure that we can only be called once.
+ let ring = unsafe { self.ring.init() };
+ // Safety: since this is only called once, we're taking the only
+ // mutable reference available to the program.
+ let buffers = unsafe { &mut *self.buffers.get() };
+ let buffers = buffers.as_mut_slice();
+ init_descriptors(ring, buffers);
+ IoSlices::new(ring, MTU)
+ }
+}
+
+impl<const COUNT: usize, const MTU: usize> IoBuffers<txbd::TxBD, COUNT, MTU> {
+ pub fn take(&'static mut self) -> IoSlices<'static, txbd::TxBD> {
+ self.init(|descriptors, buffers| {
+ for (descriptor, buffer) in descriptors.iter_mut().zip(buffers.iter_mut()) {
+ ral_registers::write_reg!(
+ txbd,
+ descriptor,
+ data_buffer_pointer,
+ buffer.0.as_mut_ptr() as _
+ );
+ }
+
+ // When the DMA engine reaches this descriptor, it needs to wrap
+ // around to the first descriptor.
+ if let Some(descriptor) = descriptors.last_mut() {
+ ral_registers::modify_reg!(txbd, descriptor, flags, wrap: 1);
+ }
+ })
+ }
+}
+
+impl<const COUNT: usize, const MTU: usize> IoBuffers<rxbd::RxBD, COUNT, MTU> {
+ pub fn take(&'static mut self) -> IoSlices<'static, rxbd::RxBD> {
+ self.init(|descriptors, buffers| {
+ for (descriptor, buffer) in descriptors.iter_mut().zip(buffers.iter_mut()) {
+ ral_registers::write_reg!(
+ rxbd,
+ descriptor,
+ data_buffer_pointer,
+ buffer.0.as_mut_ptr() as _
+ );
+ // Zero all other flags.
+ ral_registers::write_reg!(rxbd, descriptor, flags, empty: 1);
+ }
+
+ // When the DMA engine reaches this descriptor, it needs to wrap
+ // around to the first descriptor.
+ if let Some(descriptor) = descriptors.last_mut() {
+ ral_registers::modify_reg!(rxbd, descriptor, flags, wrap: 1);
+ }
+ })
+ }
+}
+
+pub struct IoSlices<'a, D> {
+ ring: &'a mut [D],
+ mtu: usize,
+ index: usize,
+}
+
+pub type ReceiveSlices<'a> = IoSlices<'a, rxbd::RxBD>;
+pub type TransmitSlices<'a> = IoSlices<'a, txbd::TxBD>;
+
+impl<'a, D> IoSlices<'a, D> {
+ fn new(ring: &'a mut [D], mtu: usize) -> Self {
+ Self {
+ ring,
+ mtu,
+ index: 0,
+ }
+ }
+ pub(crate) fn as_ptr(&self) -> *const D {
+ self.ring.as_ptr()
+ }
+ pub(crate) fn mtu(&self) -> usize {
+ self.mtu
+ }
+}
+
+impl<D> IoSlices<'_, D> {
+ fn next_impl<'a, R: 'a>(
+ &'a mut self,
+ check: impl FnOnce(&D) -> bool,
+ ready: R,
+ ) -> Option<IoToken<'a, D, R>> {
+ let next = (self.index + 1) % self.ring.len();
+ let descriptor = self.ring.get_mut(self.index).unwrap();
+ if check(descriptor) {
+ Some(IoToken {
+ descriptor,
+ index: &mut self.index,
+ next,
+ mtu: self.mtu,
+ ready,
+ })
+ } else {
+ None
+ }
+ }
+}
+
+pub struct IoToken<'a, D, R> {
+ descriptor: &'a mut D,
+ index: &'a mut usize,
+ next: usize,
+ mtu: usize,
+ ready: R,
+}
+
+pub type TxToken<'a> = IoToken<'a, txbd::TxBD, crate::TxReady<'a>>;
+pub type RxToken<'a> = IoToken<'a, rxbd::RxBD, crate::RxReady<'a>>;
+
+impl ReceiveSlices<'_> {
+ pub(crate) fn next_token<'a>(&'a mut self, ready: crate::RxReady<'a>) -> Option<RxToken<'a>> {
+ self.next_impl(
+ |rxbd| ral_registers::read_reg!(rxbd, rxbd, flags, empty == 0),
+ ready,
+ )
+ }
+}
+
+impl TransmitSlices<'_> {
+ pub(crate) fn next_token<'a>(&'a mut self, ready: crate::TxReady<'a>) -> Option<TxToken<'a>> {
+ self.next_impl(
+ |txbd| ral_registers::read_reg!(txbd, txbd, flags, ready == 0),
+ ready,
+ )
+ }
+}
+
+impl smoltcp::phy::TxToken for TxToken<'_> {
+ fn consume<R, F>(self, len: usize, f: F) -> R
+ where
+ F: FnOnce(&mut [u8]) -> R,
+ {
+ // Safety: we ensure that smoltcp isn't exceeding the size of the buffer.
+ // We know that the pointer is valid. Module inspection reveals that this is the
+ // only mutable reference to the pointer; it's tracked through the descriptor
+ // lifetimes.
+ let buffer = unsafe {
+ assert!(len <= self.mtu);
+ let ptr =
+ ral_registers::read_reg!(txbd, self.descriptor, data_buffer_pointer) as *mut u8;
+ core::slice::from_raw_parts_mut(ptr, len)
+ };
+
+ let result = f(buffer);
+ core::sync::atomic::fence(Ordering::SeqCst);
+
+ ral_registers::write_reg!(txbd, self.descriptor, data_length, len as _);
+ ral_registers::modify_reg!(txbd, self.descriptor, flags,
+ ready: 1,
+ last_in: 1,
+ transmit_crc: 1,
+ );
+ self.ready.consume();
+ *self.index = self.next;
+ result
+ }
+}
+
+impl smoltcp::phy::RxToken for RxToken<'_> {
+ fn consume<R, F>(self, f: F) -> R
+ where
+ F: FnOnce(&mut [u8]) -> R,
+ {
+ // Safety: hardware will not exceed our maximum frame length. We know that
+ // the pointer is valid; see discussion above.
+ let buffer = unsafe {
+ let len = ral_registers::read_reg!(rxbd, self.descriptor, data_length) as usize;
+ assert!(len <= self.mtu);
+ let ptr =
+ ral_registers::read_reg!(rxbd, self.descriptor, data_buffer_pointer) as *mut u8;
+ core::slice::from_raw_parts_mut(ptr, len)
+ };
+
+ let result = f(buffer);
+ ral_registers::modify_reg!(rxbd, self.descriptor, flags, empty: 1);
+ self.ready.consume();
+ *self.index = self.next;
+ result
+ }
+}
diff --git a/src/bd/rxbd.rs b/src/bd/rxbd.rs
new file mode 100644
index 0000000..8523d6e
--- /dev/null
+++ b/src/bd/rxbd.rs
@@ -0,0 +1,95 @@
+//! Enhanced receive buffer descriptor layout and fields.
+
+use ral_registers::{RORegister, RWRegister};
+
+#[repr(C)]
+pub struct RxBD {
+ pub data_length: RORegister<u16>,
+ pub flags: RWRegister<u16>,
+ pub data_buffer_pointer: RWRegister<u32>,
+ pub status: RWRegister<u16>,
+ pub control: RWRegister<u16>,
+ pub checksum: RORegister<u16>,
+ pub header: RORegister<u16>,
+ _reserved0: [u16; 1],
+ pub last_bdu: RWRegister<u16>,
+ pub timestamp_1588: RORegister<u32>,
+ _reserved1: [u16; 4],
+}
+
+bdfields!(flags, u16,
+ empty [ offset = 15, bits = 1, ],
+ ro1 [ offset = 14, bits = 1, ],
+ wrap [ offset = 13, bits = 1, ],
+ ro2 [ offset = 12, bits = 1, ],
+ last [ offset = 11, bits = 1, ],
+
+ miss [ offset = 8, bits = 1, ],
+ broadcast [ offset = 7, bits = 1, ],
+ multicast [ offset = 6, bits = 1, ],
+ length_violation [ offset = 5, bits = 1, ],
+ non_octet_violation [ offset = 4, bits = 1, ],
+
+ crc_error [ offset = 2, bits = 1, ],
+ overrun [ offset = 1, bits = 1, ],
+ truncated [ offset = 0, bits = 1, ],
+);
+
+bdfields!(status, u16,
+ vlan_priority [ offset = 13, bits = 3, ],
+ ip_checksum_error [ offset = 5, bits = 1, ],
+ protocol_checksum_error [ offset = 4, bits = 1, ],
+ vlan [ offset = 2, bits = 1, ],
+ ipv6 [ offset = 1, bits = 1, ],
+ frag [ offset = 0, bits = 1, ],
+);
+
+bdfields!(control, u16,
+ mac_error [ offset = 15, bits = 1, ],
+ phy_error [ offset = 10, bits = 1, ],
+ collision [ offset = 9, bits = 1, ],
+ unicast [ offset = 8, bits = 1, ],
+ interrupt [ offset = 7, bits = 1, ],
+);
+
+bdfields!(header, u16,
+ length [ offset = 11, bits = 5, ],
+ protocol [ offset = 0, bits = 8, ],
+);
+
+bdfields!(last_bdu, u16,
+ last_bdu [ offset = 15, bits = 1, ],
+);
+
+#[cfg(test)]
+mod tests {
+ use core::ptr::addr_of;
+
+ use super::RxBD;
+
+ fn zeroed() -> RxBD {
+ // Safety: zero bitpattern is fine for primitive fields.
+ unsafe { core::mem::MaybeUninit::zeroed().assume_init() }
+ }
+
+ #[test]
+ fn field_offsets() {
+ let rxbd = zeroed();
+ let start = &rxbd as *const _ as *const u8;
+ assert_eq!(unsafe { start.add(0x0) }, addr_of!(rxbd.data_length).cast());
+ assert_eq!(unsafe { start.add(0x2) }, addr_of!(rxbd.flags).cast());
+ assert_eq!(
+ unsafe { start.add(0x4) },
+ addr_of!(rxbd.data_buffer_pointer).cast()
+ );
+ assert_eq!(unsafe { start.add(0x8) }, addr_of!(rxbd.status).cast());
+ assert_eq!(unsafe { start.add(0xA) }, addr_of!(rxbd.control).cast());
+ assert_eq!(unsafe { start.add(0xC) }, addr_of!(rxbd.checksum).cast());
+ assert_eq!(unsafe { start.add(0xE) }, addr_of!(rxbd.header).cast());
+ assert_eq!(unsafe { start.add(0x12) }, addr_of!(rxbd.last_bdu).cast());
+ assert_eq!(
+ unsafe { start.add(0x14) },
+ addr_of!(rxbd.timestamp_1588).cast()
+ );
+ }
+}
diff --git a/src/bd/txbd.rs b/src/bd/txbd.rs
new file mode 100644
index 0000000..9550eff
--- /dev/null
+++ b/src/bd/txbd.rs
@@ -0,0 +1,91 @@
+//! Enhanced transmit buffer descriptor layout and fields.
+
+use ral_registers::RWRegister;
+
+#[repr(C)]
+pub struct TxBD {
+ pub data_length: RWRegister<u16>,
+ pub flags: RWRegister<u16>,
+ pub data_buffer_pointer: RWRegister<u32>,
+ pub errors: RWRegister<u16>,
+ pub control: RWRegister<u16>,
+ pub launch_time: RWRegister<u32>,
+ _reserved0: [u16; 1],
+ pub last_bdu: RWRegister<u16>,
+ pub timestamp_1588: RWRegister<u32>,
+ _reserved1: [u16; 4],
+}
+
+bdfields!(flags, u16,
+ ready [ offset = 15, bits = 1, ],
+ to1 [ offset = 14, bits = 1, ],
+ wrap [ offset = 13, bits = 1, ],
+ to2 [ offset = 12, bits = 1, ],
+ last_in [ offset = 11, bits = 1, ],
+ transmit_crc [ offset = 10, bits = 1, ],
+);
+
+bdfields!(errors, u16,
+ transmit [ offset = 15, bits = 1, ],
+ underflow [ offset = 13, bits = 1, ],
+ excess_collision [ offset = 12, bits = 1, ],
+ frame_error [ offset = 11, bits = 1, ],
+ late_collision [ offset = 10, bits = 1, ],
+ overflow [ offset = 9, bits = 1, ],
+ timestamp [ offset = 8, bits = 1, ],
+);
+
+bdfields!(control, u16,
+ interrupt [ offset = 14, bits = 1, ],
+ timestamp [ offset = 13, bits = 1, ],
+ pins [ offset = 12, bits = 1, ],
+ iins [ offset = 11, bits = 1, ],
+ utlt [ offset = 8, bits = 1, ],
+ ftype [ offset = 4, bits = 4, NON_AVB = 0, AVB_A = 1, AVB_B = 2 ],
+);
+
+bdfields!(last_bdu, u16,
+ last_bdu [ offset = 15, bits = 1, ],
+);
+
+#[cfg(test)]
+mod tests {
+ use super::TxBD;
+ use std::ptr::addr_of;
+
+ fn zeroed() -> TxBD {
+ // Safety: zero bitpattern is fine for primitive fields.
+ unsafe { core::mem::MaybeUninit::zeroed().assume_init() }
+ }
+
+ #[test]
+ fn field_offsets() {
+ let txbd = zeroed();
+ let start = &txbd as *const _ as *const u8;
+ assert_eq!(unsafe { start.add(0x0) }, addr_of!(txbd.data_length).cast());
+ assert_eq!(unsafe { start.add(0x2) }, addr_of!(txbd.flags).cast());
+ assert_eq!(
+ unsafe { start.add(0x4) },
+ addr_of!(txbd.data_buffer_pointer).cast()
+ );
+ assert_eq!(unsafe { start.add(0x8) }, addr_of!(txbd.errors).cast());
+ assert_eq!(unsafe { start.add(0xA) }, addr_of!(txbd.control).cast());
+ assert_eq!(unsafe { start.add(0xC) }, addr_of!(txbd.launch_time).cast());
+ assert_eq!(unsafe { start.add(0x12) }, addr_of!(txbd.last_bdu).cast());
+ assert_eq!(
+ unsafe { start.add(0x14) },
+ addr_of!(txbd.timestamp_1588).cast()
+ );
+ }
+
+ #[test]
+ fn bdfields_enum() {
+ let txbd = zeroed();
+ ral_registers::modify_reg!(super, &txbd, control, ftype: AVB_B);
+ assert_eq!(txbd.control.read(), 0x2 << 4);
+ ral_registers::modify_reg!(super, &txbd, control, interrupt: 1);
+ assert_eq!(txbd.control.read(), 0x2 << 4 | 1 << 14);
+ ral_registers::modify_reg!(super, &txbd, control, ftype: 0);
+ assert_eq!(txbd.control.read(), 1 << 14);
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..816d944
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,361 @@
+//! Ethernet driver for i.MX RT MCUs.
+
+#![cfg_attr(all(target_arch = "arm", target_os = "none"), no_std)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+mod bd;
+
+pub use bd::{IoBuffers, IoSlices, ReceiveBuffers, ReceiveSlices, TransmitBuffers, TransmitSlices};
+use imxrt_ral as ral;
+
+pub use mdio::miim::{Read as MiimRead, Write as MiimWrite};
+pub use smoltcp;
+
+/// Allows independent transmit and receive functions.
+#[derive(Debug, Clone, Copy, defmt::Format)]
+pub enum Duplex {
+ /// Transmit and receive functions cannot overlap.
+ ///
+ /// Specifically, you cannot transmit frames while you're receiving frames.
+ /// Similarly, you cannot receive frames while you're sending frames.
+ Half,
+ /// The MAC can transmit and receive simultaneously.
+ ///
+ /// Specifically, the receive path operates independent of the transmit
+ /// path. You can transmit frames without concern for carrier sense and
+ /// collision signals.
+ Full,
+}
+
+/// Ethernet MAC and related functions.
+///
+/// The MDIO interface is always enabled. To generally use the MDIO interface,
+/// use [`MiimRead`] and [`MiimWrite`]. Once your driver is configured, use
+/// [`enable_mac`](Enet::enable_mac) to enable the transmit and receive datapaths.
+///
+/// The MAC implements the `phy` interfaces from [`smoltcp`]. The driver optimizes
+/// for hardware-based checksumming as much as possible, but this only applies to
+/// the network and transport layers.
+pub struct Enet<const N: u8> {
+ enet: ral::enet::Instance<N>,
+ tx_ring: TransmitSlices<'static>,
+ rx_ring: ReceiveSlices<'static>,
+}
+
+impl<const N: u8> Enet<N> {
+ pub fn new(
+ enet: ral::enet::Instance<N>,
+ tx_ring: TransmitSlices<'static>,
+ rx_ring: ReceiveSlices<'static>,
+ source_clock_hz: u32,
+ mac: &[u8; 6],
+ ) -> Self {
+ // Reset the module.
+ ral::modify_reg!(ral::enet, enet, ECR, RESET: 1);
+
+ ral::modify_reg!(ral::enet, enet, ECR,
+ DBSWP: 1, // Swap data for this little endian device.
+ EN1588: 1, // Use enhanced buffer descriptors.
+ RESET: 0, // I think this auto-clears, but just in case...
+ DBGEN: 0, // Keep running the MAC in debug mode.
+ );
+
+ // Turn off all interrupts.
+ ral::write_reg!(ral::enet, enet, EIMR, 0);
+
+ // The maximum receive buffer size includes four low bits of the register.
+ // The user's buffer needs to be a non-zero multiple of 16 to account for
+ // those extra bytes. We double-check this by asserting the requirement at
+ // compile time in the IoBuffer types.
+ debug_assert!(rx_ring.mtu() != 0 && rx_ring.mtu() & 0xF == rx_ring.mtu());
+ ral::write_reg!(ral::enet, enet, MRBR, R_BUF_SIZE: (rx_ring.mtu() >> 4) as u32);
+
+ // Descriptor rings are pre-configured when the user acquires the slices.
+ ral::write_reg!(ral::enet, enet, TDSR, tx_ring.as_ptr() as _);
+ ral::write_reg!(ral::enet, enet, RDSR, rx_ring.as_ptr() as _);
+
+ const SMI_MDC_FREQUENCY_HZ: u32 = 2_500_000;
+ let mii_speed =
+ (source_clock_hz + 2 * SMI_MDC_FREQUENCY_HZ - 1) / (2 * SMI_MDC_FREQUENCY_HZ) - 1;
+ let hold_time =
+ (10 + 1_000_000_000 / source_clock_hz - 1) / (1_000_000_000 / source_clock_hz) - 1;
+ // TODO no way to enable / disable the MII management frame preamble. Maybe a new method
+ // for the user?
+ ral::modify_reg!(ral::enet, enet, MSCR, HOLDTIME: hold_time, MII_SPEED: mii_speed);
+
+ ral::modify_reg!(ral::enet, enet, RCR,
+ // Default max frame length without VLAN tags.
+ MAX_FL: 1518,
+ // Since we're providing half-duplex control to the user, we
+ // can't also enabled loopback.
+ LOOP: 0,
+ // No need to snoop.
+ PROM: 0,
+ // Do not reject broadcast frames; we might be interested
+ // in these.
+ BC_REJ: 0,
+ // The MAC doesn't supply pause frames to the application.
+ PAUFWD: 0,
+ // Drop padding, along with the CRC, when supplying frames
+ // to our software. This configuration implicitly includes
+ // the CRC, so the CRCFWD below has no effect.
+ PADEN: 1,
+ // Drop the CRC in received frames. This doesn't turn off
+ // CRC checking at the hardware level.
+ //
+ // If PADEN is set, this configuration does nothing.
+ CRCFWD: 1,
+ // Check the payload length based on the expected frame type /
+ // frame length (encoded in the frame).
+ NLC: 1,
+ // Enable flow control; react to pause frames by pausing the data
+ // transmit paths.
+ FCE: 1,
+ // MII or RMII mode; must be set.
+ MII_MODE: 1,
+ // Default to MII; users can enable RMII later.
+ RMII_MODE: 0,
+ // Default to 100Mbit/sec; users can throttle later.
+ RMII_10T: 0,
+ );
+
+ ral::modify_reg!(ral::enet, enet, TCR,
+ // smoltcp is not including frame CRCs from software. Let
+ // the hardware handle it.
+ CRCFWD: 0,
+ // smoltcp is including the address in its frames, so we'll
+ // pass it through.
+ ADDINS: 0,
+ );
+
+ // Enable store-and-forward: start transmitting once you have a complete
+ // frame in the FIFO.
+ ral::modify_reg!(ral::enet, enet, TFWR, STRFWD: 1);
+ // Maintain store-and-forward on the receive path: use the receive queue
+ // as a buffer until an entire frame is received.
+ ral::write_reg!(ral::enet, enet, RSFL, 0);
+
+ // These accelerator options assume store-and-forward operations on both
+ // data paths. See above.
+ ral::modify_reg!(ral::enet, enet, RACC,
+ // Discard frames with MAC errors (checksumming, length, PHY errors).
+ LINEDIS: 1,
+ // Discard frames with the wrong checksums for the protocol and headers.
+ PRODIS: 1,
+ IPDIS: 1,
+ // Discard any padding within a short IP datagram.
+ PADREM: 1,
+ );
+ ral::modify_reg!(ral::enet, enet, TACC,
+ // Enable protocol checksums. Assumes that smoltcp sets these fields
+ // to zero on our behalf.
+ PROCHK: 1,
+ // Enable IP checksum injection into the IPv4 header. Assumes that smoltcp
+ // sets these fields to zero on our behalf.
+ IPCHK: 1,
+ );
+
+ // Commit the MAC address so we can match against it in the receive path.
+ ral::write_reg!(
+ ral::enet,
+ enet,
+ PALR,
+ (mac[0] as u32) << 24 | (mac[1] as u32) << 16 | (mac[2] as u32) << 8 | (mac[3] as u32)
+ );
+ ral::write_reg!(
+ ral::enet,
+ enet,
+ PAUR,
+ (mac[4] as u32) << 24 | (mac[5] as u32) << 16
+ );
+
+ Self {
+ enet,
+ tx_ring,
+ rx_ring,
+ }
+ }
+
+ /// Enable (`true`) or disable (`false`) the MAC.
+ ///
+ /// A disabled MAC cannot receive or send frames. By default, the MAC is disabled,
+ /// and you'll need to enable it once you've completed driver configuration.
+ #[inline]
+ pub fn enable_mac(&mut self, enable: bool) {
+ ral::modify_reg!(ral::enet, self.enet, ECR, ETHEREN: enable as u32);
+ if enable {
+ ral::write_reg!(ral::enet, self.enet, RDAR, RDAR: 1);
+ }
+ }
+
+ /// Indicates if the ENET MAC is (`true`) or is not (`false`) enabled.
+ #[inline]
+ pub fn is_mac_enabled(&self) -> bool {
+ ral::read_reg!(ral::enet, self.enet, ECR, ETHEREN == 1)
+ }
+
+ /// Enable (`true`) or disable (`false`) RMII mode.
+ ///
+ /// By default, the driver is in MII mode.
+ ///
+ /// # Panics
+ ///
+ /// Panics if called while the MAC is enabled.
+ // TODO(mciantyre) enums for MII modes, speeds, duplex?
+ #[inline]
+ pub fn enable_rmii_mode(&mut self, enable: bool) {
+ debug_assert!(!self.is_mac_enabled());
+ ral::modify_reg!(ral::enet, self.enet, RCR, RMII_MODE: enable as u32);
+ }
+
+ /// Throttle the receive pathway to 10Mbit/s.
+ ///
+ /// When enabled, the recieve pathway operates in 10Mbit/s.
+ /// By default, or when disabled, the receive pathway is at
+ /// 100Mbit/s.
+ ///
+ /// # Panics
+ ///
+ /// Panics if called while the MAC is enabled.
+ // TODO(mciantyre) enums for MII modes, speeds, duplex?
+ #[inline]
+ pub fn enable_10t_mode(&mut self, enable: bool) {
+ debug_assert!(!self.is_mac_enabled());
+ ral::modify_reg!(ral::enet, self.enet, RCR, RMII_10T: enable as u32);
+ }
+
+ /// Set the half-/full-duplex operation of the MAC.
+ ///
+ /// For more information, see the [`Duplex`] documentation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if called while the MAC is enabled.
+ #[inline]
+ pub fn set_duplex(&mut self, duplex: Duplex) {
+ debug_assert!(!self.is_mac_enabled());
+ match duplex {
+ Duplex::Full => {
+ ral::modify_reg!(ral::enet, self.enet, TCR, FDEN: 1);
+ ral::modify_reg!(ral::enet, self.enet, RCR, DRT: 0);
+ }
+ Duplex::Half => {
+ ral::modify_reg!(ral::enet, self.enet, TCR, FDEN: 0);
+ ral::modify_reg!(ral::enet, self.enet, RCR, DRT: 1);
+ }
+ }
+ }
+
+ /// Enable (`true`) or disable (`false`) management information database
+ /// (MIB) statistic indicators.
+ ///
+ /// When enabled, the hardware tracks various types of errors in the
+ /// MIB and remote network monitoring registers.
+ #[inline]
+ pub fn enable_mib(&mut self, enable: bool) {
+ ral::modify_reg!(ral::enet, self.enet, MIBC, MIB_DIS: !enable as u32);
+ }
+
+ /// Set to zero all management information database (MIB) statistic indicators.
+ #[inline]
+ pub fn clear_mib(&mut self) {
+ ral::modify_reg!(ral::enet, self.enet, MIBC, MIB_CLEAR: 1);
+ ral::modify_reg!(ral::enet, self.enet, MIBC, MIB_CLEAR: 0);
+ }
+}
+
+#[doc(hidden)]
+pub struct TxReady<'a> {
+ enet: &'a ral::enet::RegisterBlock,
+}
+
+impl TxReady<'_> {
+ fn consume(self) {
+ ral::write_reg!(ral::enet, self.enet, TDAR, TDAR: 1);
+ }
+}
+
+#[doc(hidden)]
+pub struct RxReady<'a> {
+ enet: &'a ral::enet::RegisterBlock,
+}
+
+impl RxReady<'_> {
+ fn consume(self) {
+ ral::write_reg!(ral::enet, self.enet, RDAR, RDAR: 1);
+ }
+}
+
+/// An error during an MII transfer.
+///
+/// TODO where are they?
+#[non_exhaustive]
+#[derive(Debug, defmt::Format)]
+pub enum MiiError {}
+
+impl<const N: u8> mdio::Read for Enet<N> {
+ type Error = MiiError;
+
+ #[inline]
+ fn read(&mut self, ctrl_bits: u16) -> Result<u16, Self::Error> {
+ // Place the control bits in to the high half-word of the register.
+ let mmfr = (ctrl_bits as u32) << 16;
+ ral::write_reg!(ral::enet, self.enet, MMFR, mmfr);
+
+ while ral::read_reg!(ral::enet, self.enet, EIR, MII == 0) {}
+ ral::write_reg!(ral::enet, self.enet, EIR, MII: 1);
+
+ // Automatically discards control bits.
+ Ok(ral::read_reg!(ral::enet, self.enet, MMFR, DATA) as u16)
+ }
+}
+
+impl<const N: u8> mdio::Write for Enet<N> {
+ type Error = MiiError;
+
+ #[inline]
+ fn write(&mut self, ctrl_bits: u16, data_bits: u16) -> Result<(), Self::Error> {
+ // Place control bits into high half-word of register.
+ let mmfr = (ctrl_bits as u32) << 16 | data_bits as u32;
+ ral::write_reg!(ral::enet, self.enet, MMFR, mmfr);
+
+ while ral::read_reg!(ral::enet, self.enet, EIR, MII == 0) {}
+ ral::write_reg!(ral::enet, self.enet, EIR, MII: 1);
+
+ Ok(())
+ }
+}
+
+impl<const N: u8> smoltcp::phy::Device for Enet<N> {
+ type RxToken<'a> = bd::RxToken<'a>;
+ type TxToken<'a> = bd::TxToken<'a>;
+
+ fn receive(
+ &mut self,
+ _: smoltcp::time::Instant,
+ ) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
+ let tx = self.tx_ring.next_token(TxReady { enet: &self.enet })?;
+ let rx = self.rx_ring.next_token(RxReady { enet: &self.enet })?;
+ Some((rx, tx))
+ }
+
+ fn transmit(&mut self, _: smoltcp::time::Instant) -> Option<Self::TxToken<'_>> {
+ self.tx_ring.next_token(TxReady { enet: &self.enet })
+ }
+
+ fn capabilities(&self) -> smoltcp::phy::DeviceCapabilities {
+ let mtu = self.tx_ring.mtu().min(self.rx_ring.mtu());
+
+ let mut caps = smoltcp::phy::DeviceCapabilities::default();
+ caps.medium = smoltcp::phy::Medium::Ethernet;
+ caps.max_transmission_unit = mtu;
+ caps.max_burst_size = Some(mtu);
+
+ caps.checksum.ipv4 = smoltcp::phy::Checksum::None;
+ caps.checksum.udp = smoltcp::phy::Checksum::None;
+ caps.checksum.tcp = smoltcp::phy::Checksum::None;
+ caps.checksum.icmpv4 = smoltcp::phy::Checksum::None;
+
+ caps
+ }
+}