1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
//! Enhanced receive buffer descriptor layout and fields.
use core::sync::atomic::{AtomicU16, AtomicU32, Ordering};
#[repr(C)]
pub struct RxBD {
pub data_length: AtomicU16,
pub flags: AtomicU16,
pub data_buffer_pointer: AtomicU32,
pub status: AtomicU16,
pub control: AtomicU16,
pub checksum: AtomicU16,
pub header: AtomicU16,
_reserved0: [u16; 1],
pub last_bdu: AtomicU16,
pub timestamp_1588: AtomicU32,
_reserved1: [u16; 4],
}
pub const FLAGS_EMPTY: u16 = 1 << 15;
pub const FLAGS_WRAP: u16 = 1 << 13;
impl RxBD {
pub(crate) fn is_empty(&self) -> bool {
self.flags.load(Ordering::SeqCst) & FLAGS_EMPTY != 0
}
}
#[cfg(test)]
mod tests {
use core::ptr::addr_of;
use super::RxBD;
fn zeroed() -> RxBD {
// Safety: zero bitpattern is fine for primitive fields.
unsafe { core::mem::MaybeUninit::zeroed().assume_init() }
}
#[test]
fn field_offsets() {
let rxbd = zeroed();
let start = &rxbd as *const _ as *const u8;
assert_eq!(unsafe { start.add(0x0) }, addr_of!(rxbd.data_length).cast());
assert_eq!(unsafe { start.add(0x2) }, addr_of!(rxbd.flags).cast());
assert_eq!(
unsafe { start.add(0x4) },
addr_of!(rxbd.data_buffer_pointer).cast()
);
assert_eq!(unsafe { start.add(0x8) }, addr_of!(rxbd.status).cast());
assert_eq!(unsafe { start.add(0xA) }, addr_of!(rxbd.control).cast());
assert_eq!(unsafe { start.add(0xC) }, addr_of!(rxbd.checksum).cast());
assert_eq!(unsafe { start.add(0xE) }, addr_of!(rxbd.header).cast());
assert_eq!(unsafe { start.add(0x12) }, addr_of!(rxbd.last_bdu).cast());
assert_eq!(
unsafe { start.add(0x14) },
addr_of!(rxbd.timestamp_1588).cast()
);
}
}
|