Skip to content

Commit 65c77e8

Browse files
committed
zephyr: device: uart: irq: Replace write with async API
Update the writing to be an async interface. Instead of a blocking write, there is a `write_enqueue` to add the message, and a `write_wait` which is a blocking wait for a write to handle. For this to work, the UartIrq needs to own the buffer for the duration of the write. At this point, the buffers are just `Vec<u8>`, but a future change will make this more general, so that, for example, Pooled buffers can be used. The buffer should never be dropped within this code. Signed-off-by: David Brown <[email protected]>
1 parent 673c5c5 commit 65c77e8

File tree

1 file changed

+119
-74
lines changed

1 file changed

+119
-74
lines changed

zephyr/src/device/uart/irq.rs

Lines changed: 119 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,12 @@
44
#![allow(dead_code)]
55
#![allow(unused_variables)]
66

7+
extern crate alloc;
8+
9+
// TODO: This should be a generic Buffer type indicating some type of owned buffer, with Vec as one
10+
// possible implementation.
11+
use alloc::vec::Vec;
12+
713
use arraydeque::ArrayDeque;
814

915
use crate::raw;
@@ -13,6 +19,8 @@ use crate::sync::{Arc, SpinMutex};
1319
use crate::time::{NoWait, Timeout};
1420

1521
use core::ffi::c_void;
22+
use core::ops::Range;
23+
use core::{fmt, result};
1624

1725
use super::Uart;
1826

@@ -23,20 +31,38 @@ const BUFFER_SIZE: usize = 256;
2331

2432
/// The "outer" struct holds the semaphore, and the mutex. The semaphore has to live outside of the
2533
/// mutex because it can only be waited on when the Mutex is not locked.
26-
struct IrqOuterData {
34+
struct IrqOuterData<const WS: usize, const RS: usize> {
2735
read_sem: Semaphore,
36+
/// Write semaphore. This should **exactly** match the number of elements in `write_dones`.
2837
write_sem: Semaphore,
29-
inner: SpinMutex<IrqInnerData>,
38+
inner: SpinMutex<IrqInnerData<WS, RS>>,
3039
}
3140

3241
/// Data for communication with the UART IRQ.
33-
struct IrqInnerData {
42+
struct IrqInnerData<const WS: usize, const RS: usize> {
3443
/// The Ring buffer holding incoming and read data.
3544
buffer: ArrayDeque<u8, BUFFER_SIZE>,
36-
/// Data to be written, if that is the case.
37-
///
38-
/// If this is Some, then the irq should be enabled.
39-
write: Option<WriteSlice>,
45+
/// Write request. The 'head' is the one being worked on. Once completed, they will move into
46+
/// the completion queue.
47+
write_requests: ArrayDeque<WriteRequest, WS>,
48+
/// Completed writes.
49+
write_dones: ArrayDeque<WriteDone, WS>,
50+
}
51+
52+
/// A single requested write. This is a managed buffer, and a range of the buffer to actually
53+
/// write. The write is completed when `pos` == `len`.
54+
struct WriteRequest {
55+
/// The data to write.
56+
data: Vec<u8>,
57+
/// What part to write.
58+
part: Range<usize>,
59+
}
60+
61+
/// A completed write. All the requested data will have been written, and this returns the buffer
62+
/// to the user.
63+
struct WriteDone {
64+
/// The returned buffer.
65+
data: Vec<u8>,
4066
}
4167

4268
/// Represents a slice of data that the irq is going to write.
@@ -56,6 +82,19 @@ impl WriteSlice {
5682
}
5783
}
5884

85+
/// The error type from write requests. Used to return the buffer.
86+
pub struct WriteError(pub Vec<u8>);
87+
88+
// The default Debug for Write error will print the whole buffer, which isn't particularly useful.
89+
impl fmt::Debug for WriteError {
90+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
91+
write!(f, "WriteError(...)")
92+
}
93+
}
94+
95+
/// The wait for write completion timed out.
96+
pub struct WriteWaitTimedOut;
97+
5998
/// An interface to the UART, that uses the "legacy" IRQ API.
6099
///
61100
/// The interface is parameterized by two value, `WS` is the number of elements in the write ring,
@@ -67,7 +106,7 @@ pub struct UartIrq<const WS: usize, const RS: usize> {
67106
/// Interior wrapped device, to be able to hand out lifetime managed references to it.
68107
uart: Uart,
69108
/// Critical section protected data.
70-
data: Arc<IrqOuterData>,
109+
data: Arc<IrqOuterData<WS, RS>>,
71110
}
72111

73112
// UartIrq is also Send, !Sync, for the same reasons as for Uart.
@@ -77,11 +116,12 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
77116
/// Convert uart into irq driven one.
78117
pub unsafe fn new(uart: Uart) -> Result<Self> {
79118
let data = Arc::new(IrqOuterData {
80-
read_sem: Semaphore::new(0, 1)?,
81-
write_sem: Semaphore::new(0, 1)?,
119+
read_sem: Semaphore::new(0, RS as u32)?,
120+
write_sem: Semaphore::new(0, WS as u32)?,
82121
inner: SpinMutex::new(IrqInnerData {
83122
buffer: ArrayDeque::new(),
84-
write: None,
123+
write_requests: ArrayDeque::new(),
124+
write_dones: ArrayDeque::new(),
85125
}),
86126
});
87127

@@ -91,7 +131,7 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
91131
let data_raw = data_raw as *mut c_void;
92132

93133
let ret = unsafe {
94-
raw::uart_irq_callback_user_data_set(uart.device, Some(irq_callback), data_raw)
134+
raw::uart_irq_callback_user_data_set(uart.device, Some(irq_callback::<WS, RS>), data_raw)
95135
};
96136
to_result_void(ret)?;
97137
// Should this be settable?
@@ -133,57 +173,54 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
133173
self.data.try_read(buf)
134174
}
135175

136-
/// A blocking write to the UART.
176+
/// Enqueue a single write request.
137177
///
138-
/// By making this blocking, we don't need to make an extra copy of the data.
139-
///
140-
/// TODO: Async write.
141-
pub unsafe fn write<T>(&mut self, buf: &[u8], timeout: T) -> usize
142-
where T: Into<Timeout>
143-
{
144-
if buf.len() == 0 {
145-
return 0;
178+
/// If the queue is full, the `WriteError` returned will return the buffer.
179+
pub fn write_enqueue(&mut self, data: Vec<u8>, part: Range<usize>) -> result::Result<(), WriteError> {
180+
let mut inner = self.data.inner.lock().unwrap();
181+
182+
let req = WriteRequest { data, part };
183+
match inner.write_requests.push_back(req) {
184+
Ok(()) => {
185+
// Make sure the write actually happens. This needs to happen for the first message
186+
// queued, if some were already queued, it should already be enabled.
187+
if inner.write_requests.len() == 1 {
188+
unsafe { raw::uart_irq_tx_enable(self.uart.device); }
189+
}
190+
Ok(())
191+
}
192+
Err(e) => Err(WriteError(e.element.data)),
146193
}
194+
}
147195

148-
// Make the data to be written available to the irq handler, and get it going.
149-
{
150-
let mut inner = self.data.inner.lock().unwrap();
151-
assert!(inner.write.is_none());
196+
/// Return true if the write queue is full.
197+
///
198+
/// There is a race between this and write_enqueue, but only in the safe direction (this may
199+
/// return false, but a write may complete before being able to call write_enqueue).
200+
pub fn write_is_full(&self) -> bool {
201+
let inner = self.data.inner.lock().unwrap();
152202

153-
inner.write = Some(WriteSlice {
154-
data: buf.as_ptr(),
155-
len: buf.len(),
156-
});
203+
inner.write_requests.is_full()
204+
}
157205

158-
unsafe { raw::uart_irq_tx_enable(self.uart.device) };
206+
/// Retrieve a write completion.
207+
///
208+
/// Waits up to `timeout` for a write to complete, and returns the buffer.
209+
pub fn write_wait<T>(&mut self, timeout: T) -> result::Result<Vec<u8>, WriteWaitTimedOut>
210+
where T: Into<Timeout>,
211+
{
212+
match self.data.write_sem.take(timeout) {
213+
Ok(()) => (),
214+
// TODO: Handle other errors?
215+
Err(_) => return Err(WriteWaitTimedOut),
159216
}
160217

161-
// Wait for the transmission to complete. This shouldn't be racy, as the irq shouldn't be
162-
// giving the semaphore until there is 'write' data, and it has been consumed.
163-
let _ = self.data.write_sem.take(timeout);
164-
165-
// Depending on the driver, there might be a race here. This would result in the above
166-
// 'take' returning early, and no actual data being written.
167-
168-
{
169-
let mut inner = self.data.inner.lock().unwrap();
170-
171-
if let Some(write) = inner.write.take() {
172-
// First, make sure that no more interrupts will come in.
173-
unsafe { raw::uart_irq_tx_disable(self.uart.device) };
174-
175-
// The write did not complete, and this represents remaining data to write.
176-
buf.len() - write.len
177-
} else {
178-
// The write completed, the rx irq should be disabled. Just return the whole
179-
// buffer.
180-
buf.len()
181-
}
182-
}
218+
let mut inner = self.data.inner.lock().unwrap();
219+
Ok(inner.write_dones.pop_front().expect("Write done empty, despite semaphore").data)
183220
}
184221
}
185222

186-
impl IrqOuterData {
223+
impl<const WS: usize, const RS: usize> IrqOuterData<WS, RS> {
187224
/// Try reading from the inner data, filling the buffer with as much data as makes sense.
188225
/// Returns the number of bytes actually read, or Zero if none.
189226
fn try_read(&self, buf: &mut [u8]) -> usize {
@@ -206,12 +243,12 @@ impl IrqOuterData {
206243
}
207244
}
208245

209-
extern "C" fn irq_callback(
246+
extern "C" fn irq_callback<const WS: usize, const RS: usize>(
210247
dev: *const raw::device,
211248
user_data: *mut c_void,
212249
) {
213250
// Convert our user data, back to the CS Mutex.
214-
let outer = unsafe { &*(user_data as *const IrqOuterData) };
251+
let outer = unsafe { &*(user_data as *const IrqOuterData<WS, RS>) };
215252
let mut inner = outer.inner.lock().unwrap();
216253

217254
// TODO: Make this more efficient.
@@ -234,26 +271,34 @@ extern "C" fn irq_callback(
234271
outer.read_sem.give();
235272
}
236273

237-
// If there is data to write, ensure the fifo is full, and when we run out of data, disable the
238-
// interrupt and signal the waiting thread.
239-
if let Some(write) = inner.write.take() {
240-
let count = unsafe {
241-
raw::uart_fifo_fill(dev, write.data, write.len as i32)
242-
};
243-
if count < 0 {
244-
panic!("Incorrect use of device fifo");
245-
}
246-
let count = count as usize;
247-
248-
if count == write.len {
249-
// The write finished, leave 'write' empty, and let the thread know we're done.
250-
outer.write_sem.give();
251-
252-
// Disable the tx fifo, as we don't need it any more.
253-
unsafe { raw::uart_irq_tx_disable(dev) };
274+
// Handle any write requests.
275+
loop {
276+
if let Some(mut req) = inner.write_requests.pop_front() {
277+
if req.part.is_empty() {
278+
// This request is empty. Move to completion.
279+
inner.write_dones.push_back(WriteDone { data: req.data })
280+
.expect("write done queue is full");
281+
outer.write_sem.give();
282+
} else {
283+
// Try to write this part of the data.
284+
let piece = &req.data[req.part.clone()];
285+
let count = unsafe {
286+
raw::uart_fifo_fill(dev, piece.as_ptr(), piece.len() as i32)
287+
};
288+
if count < 0 {
289+
panic!("Incorrect use of device fifo");
290+
}
291+
let count = count as usize;
292+
293+
// Adjust the part. The next through the loop will notice the write being done.
294+
req.part.start += count;
295+
inner.write_requests.push_front(req)
296+
.expect("Unexpected write_dones overflow");
297+
}
254298
} else {
255-
// We're not finished, so remember how much is left.
256-
inner.write = Some(unsafe { write.add(count) });
299+
// No work. Turn off the irq, and stop.
300+
unsafe { raw::uart_irq_tx_disable(dev); }
301+
break;
257302
}
258303
}
259304

0 commit comments

Comments
 (0)