Skip to content

Commit 9910f3b

Browse files
committed
TMK test fo gic
1 parent 19e0e7a commit 9910f3b

File tree

8 files changed

+1590
-47
lines changed

8 files changed

+1590
-47
lines changed

Cargo.lock

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6454,6 +6454,8 @@ checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
64546454
name = "simple_tmk"
64556455
version = "0.0.0"
64566456
dependencies = [
6457+
"aarch64defs",
6458+
"bitfield-struct 0.10.1",
64576459
"minimal_rt_build",
64586460
"tmk_core",
64596461
"tmk_macros",

tmk/simple_tmk/Cargo.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ rust-version.workspace = true
77
edition.workspace = true
88

99
[dependencies]
10+
aarch64defs.workspace = true
11+
bitfield-struct.workspace = true
1012
tmk_core.workspace = true
1113
tmk_macros.workspace = true
1214
x86defs.workspace = true
Lines changed: 271 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,271 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT License.
3+
4+
//! Abstractions for memory-mapped device register(s) access.
5+
//!
6+
//! These lack an RMW implementation.
7+
8+
use core::marker::PhantomData;
9+
use core::ops::Range;
10+
use core::sync::atomic::AtomicU32;
11+
use core::sync::atomic::AtomicU64;
12+
use core::sync::atomic::Ordering;
13+
14+
/// Trait to describe atomic access.
15+
pub trait AtomicAccess<T: Copy> {
16+
/// Loads the data from the address with the spcifies ordering.
17+
///
18+
/// # Safety
19+
/// The address is valid.
20+
unsafe fn load(ptr: *mut T, order: Ordering) -> T;
21+
22+
/// Stores the data at the address with the spcifies ordering.
23+
///
24+
/// # Safety
25+
/// The address is valid.
26+
unsafe fn store(ptr: *mut T, v: T, order: Ordering);
27+
28+
/// Bitwise "or" with the current value.
29+
///
30+
/// Performs a bitwise "or" operation on the current value and the argument `v`, and
31+
/// sets the new value to the result.
32+
///
33+
/// # Safety
34+
/// The address is valid.
35+
unsafe fn fetch_or(ptr: *mut T, v: T, order: Ordering) -> T;
36+
37+
/// Bitwise "and" with the current value.
38+
///
39+
/// Performs a bitwise "and" operation on the current value and the argument `v`, and
40+
/// sets the new value to the result.
41+
///
42+
/// # Safety
43+
/// The address is valid.
44+
unsafe fn fetch_and(ptr: *mut T, v: T, order: Ordering) -> T;
45+
}
46+
47+
impl AtomicAccess<u64> for u64 {
48+
/// Loads the data from the address with the spcifies ordering.
49+
///
50+
/// # Safety
51+
/// The address is valid.
52+
unsafe fn load(ptr: *mut u64, order: Ordering) -> u64 {
53+
// SAFETY: atomic access, the address is valid.
54+
unsafe { AtomicU64::from_ptr(ptr).load(order) }
55+
}
56+
57+
/// Stores the data at the address with the spcifies ordering.
58+
///
59+
/// # Safety
60+
/// The address is valid.
61+
unsafe fn store(ptr: *mut u64, v: u64, order: Ordering) {
62+
// SAFETY: atomic access, the address is valid.
63+
unsafe { AtomicU64::from_ptr(ptr).store(v, order) };
64+
}
65+
66+
/// Bitwise "or" with the current value.
67+
///
68+
/// Performs a bitwise "or" operation on the current value and the argument `v`, and
69+
/// sets the new value to the result.
70+
///
71+
/// # Safety
72+
/// The address is valid.
73+
unsafe fn fetch_or(ptr: *mut u64, v: u64, order: Ordering) -> u64 {
74+
// SAFETY: atomic access, the address is valid.
75+
unsafe { AtomicU64::from_ptr(ptr).fetch_or(v, order) }
76+
}
77+
78+
/// Bitwise "and" with the current value.
79+
///
80+
/// Performs a bitwise "and" operation on the current value and the argument `v`, and
81+
/// sets the new value to the result.
82+
///
83+
/// # Safety
84+
/// The address is valid.
85+
unsafe fn fetch_and(ptr: *mut u64, v: u64, order: Ordering) -> u64 {
86+
// SAFETY: atomic access, the address is valid.
87+
unsafe { AtomicU64::from_ptr(ptr).fetch_and(v, order) }
88+
}
89+
}
90+
91+
impl AtomicAccess<u32> for u32 {
92+
/// Loads the data from the address with the spcifies ordering.
93+
///
94+
/// # Safety
95+
/// The address is valid.
96+
unsafe fn load(ptr: *mut u32, order: Ordering) -> u32 {
97+
// SAFETY: atomic access, the address is valid.
98+
99+
unsafe { AtomicU32::from_ptr(ptr).load(order) }
100+
}
101+
102+
/// Stores the data at the address with the spcifies ordering.
103+
///
104+
/// # Safety
105+
/// The address is valid.
106+
unsafe fn store(ptr: *mut u32, v: u32, order: Ordering) {
107+
// SAFETY: atomic access, the address is valid.
108+
unsafe { AtomicU32::from_ptr(ptr).store(v, order) };
109+
}
110+
111+
/// Bitwise "or" with the current value.
112+
///
113+
/// Performs a bitwise "or" operation on the current value and the argument `v`, and
114+
/// sets the new value to the result.
115+
///
116+
/// # Safety
117+
/// The address is valid.
118+
unsafe fn fetch_or(ptr: *mut u32, v: u32, order: Ordering) -> u32 {
119+
// SAFETY: atomic access, the address is valid.
120+
unsafe { AtomicU32::from_ptr(ptr).fetch_or(v, order) }
121+
}
122+
123+
/// Bitwise "and" with the current value.
124+
///
125+
/// Performs a bitwise "and" operation on the current value and the argument `v`, and
126+
/// sets the new value to the result.
127+
///
128+
/// # Safety
129+
/// The address is valid.
130+
unsafe fn fetch_and(ptr: *mut u32, v: u32, order: Ordering) -> u32 {
131+
// SAFETY: atomic access, the address is valid.
132+
unsafe { AtomicU32::from_ptr(ptr).fetch_and(v, order) }
133+
}
134+
}
135+
136+
/// Trait to describe the register access.
137+
pub trait DeviceRegisterSpec {
138+
/// The raw type used for memory representation.
139+
type Raw: Copy + From<Self::Value> + AtomicAccess<Self::Raw>;
140+
/// The value type used in the API.
141+
type Value: Copy + From<Self::Raw>;
142+
/// The register offset from the base address.
143+
const OFFSET: usize;
144+
/// Mmeory ordering when loading, deafults to the
145+
/// sequential consistency.
146+
const ORDERING_LOAD: Ordering = Ordering::SeqCst;
147+
/// Mmeory ordering when loading, deafults to the
148+
/// sequential consistency.
149+
const ORDERING_STORE: Ordering = Ordering::SeqCst;
150+
}
151+
152+
/// A memory-mapped device register.
153+
pub struct DeviceRegister<S: DeviceRegisterSpec> {
154+
address: *mut S::Raw,
155+
_spec: PhantomData<S>,
156+
}
157+
158+
impl<S: DeviceRegisterSpec> DeviceRegister<S> {
159+
/// Create a new MMIO register from a base address.
160+
///
161+
/// Caller must ensure:
162+
/// * the base address is valid and properly aligned,
163+
/// * the resulting address (base + OFFSET) points to valid memory,
164+
/// * the memory has the required access permissions, caching and
165+
/// attributes set.
166+
pub const fn new(base_address: usize) -> Self {
167+
Self {
168+
address: (base_address + S::OFFSET) as *mut S::Raw,
169+
_spec: PhantomData,
170+
}
171+
}
172+
173+
/// Read the register value. Might be reorderd by the CPU,
174+
/// no compiler reordering.
175+
pub fn read(&self) -> S::Value {
176+
// SAFETY: volatile access ensures proper hardware interaction: no
177+
// accesses will be elided or reordered by the compiler, and the
178+
// address comes from a trusted place.
179+
unsafe { core::ptr::read_volatile(self.address).into() }
180+
}
181+
182+
/// Write a value to the register. Might be reorderd by the CPU,
183+
/// no compiler reordering.
184+
pub fn write(&mut self, value: S::Value) {
185+
// SAFETY: volatile access ensures proper hardware interaction: no
186+
// accesses will be elided or reordered by the compiler, and the
187+
// address comes from a trusted place.
188+
unsafe { core::ptr::write_volatile(self.address, value.into()) };
189+
}
190+
191+
/// Atomically load the register value using memory ordering
192+
/// from the specification.
193+
pub fn load(&self) -> S::Value {
194+
// SAFETY: atomic access provides a correct way to interact with the
195+
// hardware, and the address comes from the trusted source.
196+
unsafe { S::Raw::load(self.address, S::ORDERING_LOAD).into() }
197+
}
198+
199+
/// Atoically store a value to the register using memory ordering
200+
/// from the specification.
201+
pub fn store(&mut self, value: S::Value) {
202+
// SAFETY: atomic access provides a correct way to interact with the
203+
// hardware, and the address comes from the trusted source.
204+
unsafe {
205+
S::Raw::store(self.address, value.into(), S::ORDERING_STORE);
206+
}
207+
}
208+
209+
/// Atomically bitise "or" load the register value using memory ordering
210+
/// from the specification, and return the old value.
211+
pub fn fetch_or(&mut self, value: S::Value) -> S::Value {
212+
// SAFETY: atomic access provides a correct way to interact with the
213+
// hardware, and the address comes from the trusted source.
214+
unsafe { S::Raw::fetch_or(self.address, value.into(), S::ORDERING_LOAD).into() }
215+
}
216+
217+
/// Atomically bitise "and" load the register value using memory ordering
218+
/// from the specification, and return the old value.
219+
pub fn fetch_and(&mut self, value: S::Value) -> S::Value {
220+
// SAFETY: atomic access provides a correct way to interact with the
221+
// hardware, and the address comes from the trusted source.
222+
unsafe { S::Raw::fetch_and(self.address, value.into(), S::ORDERING_LOAD).into() }
223+
}
224+
}
225+
226+
/// Trait defining the specification for an array of device registers
227+
pub trait DeviceRegisterArraySpec: DeviceRegisterSpec {
228+
/// The stride between consecutive registers in bytes
229+
const STRIDE: usize = 0;
230+
/// The number of registers in the array
231+
const COUNT: usize;
232+
}
233+
234+
/// An array of memory-mapped device registers
235+
pub struct DeviceRegisterArray<S: DeviceRegisterArraySpec> {
236+
base_address: usize,
237+
_spec: PhantomData<S>,
238+
}
239+
240+
impl<S: DeviceRegisterArraySpec> DeviceRegisterArray<S> {
241+
/// Create a new array of MMIO registers from a base address.
242+
///
243+
/// The user must ensure that the base address and the offset are valid,
244+
/// and that the memory is mapped as required for the device access.
245+
pub const fn new(base_address: usize) -> Self {
246+
Self {
247+
base_address,
248+
_spec: PhantomData,
249+
}
250+
}
251+
252+
/// Get a reference to a specific register in the array.
253+
pub fn index(&self, index: usize) -> DeviceRegister<S> {
254+
assert!(index < S::COUNT, "Register index out of bounds");
255+
256+
DeviceRegister::<S>::new(self.base_address + index * (S::STRIDE + size_of::<S::Raw>()))
257+
}
258+
259+
/// Iterate over all registers in the array.
260+
pub fn iter(&self) -> impl Iterator<Item = DeviceRegister<S>> + '_ {
261+
(0..S::COUNT).map(move |i| self.index(i))
262+
}
263+
264+
/// Fill the range with some value.
265+
pub fn fill(&mut self, range: Range<usize>, value: S::Value) {
266+
self.iter()
267+
.skip(range.start)
268+
.take(range.len())
269+
.for_each(|mut r| r.store(value));
270+
}
271+
}

0 commit comments

Comments
 (0)