litex/
uart.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! LiteX UART core
6//!
7//! Hardware source and documentation available at
8//! [`litex/soc/cores/uart.py`](https://github.com/enjoy-digital/litex/blob/master/litex/soc/cores/uart.py).
9
10use core::cell::Cell;
11use kernel::deferred_call::{DeferredCall, DeferredCallClient};
12use kernel::hil::uart;
13use kernel::utilities::cells::{OptionalCell, TakeCell};
14use kernel::utilities::StaticRef;
15use kernel::ErrorCode;
16
17use crate::event_manager::LiteXEventManager;
18use crate::litex_registers::{
19    register_bitfields, LiteXSoCRegisterConfiguration, Read, ReadRegWrapper, Write, WriteRegWrapper,
20};
21
22const EVENT_MANAGER_INDEX_TX: usize = 0;
23const EVENT_MANAGER_INDEX_RX: usize = 1;
24
25type LiteXUartEV<'a, R> = LiteXEventManager<
26    'a,
27    u8,
28    <R as LiteXSoCRegisterConfiguration>::ReadOnly8,
29    <R as LiteXSoCRegisterConfiguration>::ReadWrite8,
30    <R as LiteXSoCRegisterConfiguration>::ReadWrite8,
31>;
32
33/// LiteX UART PHY registers
34///
35/// This is a separate register set, as it is not necessarily present
36/// on every LiteX SoC with UART (e.g. a verilated simulation)
37#[repr(C)]
38pub struct LiteXUartPhyRegisters<R: LiteXSoCRegisterConfiguration> {
39    /// Tuning word (UART baudrate)
40    tuning_word: R::ReadWrite32,
41}
42
43/// LiteX UART registers
44#[repr(C)]
45pub struct LiteXUartRegisters<R: LiteXSoCRegisterConfiguration> {
46    /// receive & transmit register
47    rxtx: R::ReadWrite8,
48    /// transmit buffer full
49    txfull: R::ReadOnly8,
50    /// receive buffer empty
51    rxempty: R::ReadOnly8,
52    /// LiteX EventManager status register
53    ev_status: R::ReadOnly8,
54    /// LiteX EventManager pending register
55    ev_pending: R::ReadWrite8,
56    /// LiteX EventManager pending register
57    ev_enable: R::ReadWrite8,
58    /// transmit buffer empty
59    txempty: R::ReadOnly8,
60    /// receive buffer full
61    rxfull: R::ReadOnly8,
62}
63
64impl<R: LiteXSoCRegisterConfiguration> LiteXUartRegisters<R> {
65    /// Create an event manager instance for the UART events
66    fn ev(&self) -> LiteXUartEV<'_, R> {
67        LiteXUartEV::<R>::new(&self.ev_status, &self.ev_pending, &self.ev_enable)
68    }
69}
70
71register_bitfields![u8,
72    rxtx [
73        data OFFSET(0) NUMBITS(8) []
74    ],
75    txfull [
76        full OFFSET(0) NUMBITS(1) []
77    ],
78    rxempty [
79        empty OFFSET(0) NUMBITS(1) []
80    ],
81    txempty [
82        empty OFFSET(0) NUMBITS(1) []
83    ],
84    rxfull [
85        full OFFSET(0) NUMBITS(1) []
86    ]
87];
88
89pub struct LiteXUart<'a, R: LiteXSoCRegisterConfiguration> {
90    uart_regs: StaticRef<LiteXUartRegisters<R>>,
91    phy: Option<(StaticRef<LiteXUartPhyRegisters<R>>, u32)>,
92    tx_client: OptionalCell<&'a dyn uart::TransmitClient>,
93    rx_client: OptionalCell<&'a dyn uart::ReceiveClient>,
94    tx_buffer: TakeCell<'static, [u8]>,
95    tx_len: Cell<usize>,
96    tx_progress: Cell<usize>,
97    tx_aborted: Cell<bool>,
98    tx_deferred_call: Cell<bool>,
99    rx_buffer: TakeCell<'static, [u8]>,
100    rx_len: Cell<usize>,
101    rx_progress: Cell<usize>,
102    rx_aborted: Cell<bool>,
103    rx_deferred_call: Cell<bool>,
104    deferred_call: DeferredCall,
105    initialized: Cell<bool>,
106}
107
108impl<'a, R: LiteXSoCRegisterConfiguration> LiteXUart<'a, R> {
109    pub fn new(
110        uart_base: StaticRef<LiteXUartRegisters<R>>,
111        phy_args: Option<(StaticRef<LiteXUartPhyRegisters<R>>, u32)>,
112    ) -> LiteXUart<'a, R> {
113        LiteXUart {
114            uart_regs: uart_base,
115            phy: phy_args,
116            tx_client: OptionalCell::empty(),
117            rx_client: OptionalCell::empty(),
118            tx_buffer: TakeCell::empty(),
119            tx_len: Cell::new(0),
120            tx_progress: Cell::new(0),
121            tx_aborted: Cell::new(false),
122            tx_deferred_call: Cell::new(false),
123            rx_buffer: TakeCell::empty(),
124            rx_len: Cell::new(0),
125            rx_progress: Cell::new(0),
126            rx_aborted: Cell::new(false),
127            rx_deferred_call: Cell::new(false),
128            deferred_call: DeferredCall::new(),
129            initialized: Cell::new(false),
130        }
131    }
132
133    pub fn initialize(&self) {
134        self.uart_regs.ev().disable_all();
135        self.initialized.set(true);
136    }
137
138    pub fn transmit_sync(&self, bytes: &[u8]) {
139        // We need to make sure that we're not modifying interrupt
140        // pending and enabled bits here!
141        let regs = self.uart_regs;
142        let ev = regs.ev();
143
144        // Store whether there was a pending interrupt before and
145        // whether interrupts were enabled, and if we cause one, clear
146        // it after waiting until the buffer has space again.
147        let interrupt_pending = ev.event_pending(EVENT_MANAGER_INDEX_TX);
148        let interrupt_enabled = ev.event_enabled(EVENT_MANAGER_INDEX_TX);
149        ev.disable_event(EVENT_MANAGER_INDEX_TX);
150
151        for b in bytes.iter() {
152            while ReadRegWrapper::wrap(&regs.txfull).is_set(txfull::full) {}
153            WriteRegWrapper::wrap(&regs.rxtx).write(rxtx::data.val(*b));
154        }
155
156        // Wait until there is space for at least one byte
157        while ReadRegWrapper::wrap(&regs.txfull).is_set(txfull::full) {}
158
159        // Check if we generated an additional event and clear it
160        if !interrupt_pending && ev.event_pending(EVENT_MANAGER_INDEX_TX) {
161            ev.clear_event(EVENT_MANAGER_INDEX_TX);
162        }
163
164        // Check if interrupts were previously enabled and reenable in that case
165        if interrupt_enabled {
166            ev.enable_event(EVENT_MANAGER_INDEX_TX);
167        }
168    }
169
170    pub fn service_interrupt(&self) {
171        let ev = self.uart_regs.ev();
172
173        if ev.event_asserted(EVENT_MANAGER_INDEX_RX) {
174            // We cannot clear the event here, as that would discard
175            // data from the UART RX FIFO
176
177            self.rx_data();
178        }
179
180        if ev.event_asserted(EVENT_MANAGER_INDEX_TX) {
181            ev.clear_event(EVENT_MANAGER_INDEX_TX);
182            self.resume_tx();
183        }
184    }
185
186    fn deferred_rx_abort(&self) {
187        // The RX event has already been disabled
188        // Just return the buffer to the client
189        let buffer = self.rx_buffer.take().unwrap(); // Unwrap fail = no rx buffer
190        let progress = self.rx_progress.get();
191
192        self.rx_client.map(move |client| {
193            client.received_buffer(buffer, progress, Err(ErrorCode::CANCEL), uart::Error::None)
194        });
195    }
196
197    fn rx_data(&self) {
198        // New data is available for reception
199        let ev = self.uart_regs.ev();
200        let buffer = self.rx_buffer.take().unwrap(); // Unwrap fail = no rx buffer
201        let len = self.rx_len.get();
202        let mut progress = self.rx_progress.get();
203
204        // Read all available data, until we've reached the length limit
205        while {
206            !ReadRegWrapper::wrap(&self.uart_regs.rxempty).is_set(rxempty::empty) && progress < len
207        } {
208            buffer[progress] = ReadRegWrapper::wrap(&self.uart_regs.rxtx).read(rxtx::data);
209            progress += 1;
210
211            // Mark the byte as read by acknowledging the event
212            ev.clear_event(EVENT_MANAGER_INDEX_RX);
213        }
214
215        // Check whether we've reached the length limit and call to
216        // the client respectively
217        if progress == len {
218            // Disable RX events
219            self.uart_regs.ev().disable_event(EVENT_MANAGER_INDEX_RX);
220            self.rx_client
221                .map(move |client| client.received_buffer(buffer, len, Ok(()), uart::Error::None));
222        } else {
223            self.rx_buffer.replace(buffer);
224            self.rx_progress.set(progress);
225        }
226    }
227
228    fn deferred_tx_abort(&self) {
229        // The TX event has already been disabled
230        // Just return the buffer to the client
231        let buffer = self.tx_buffer.take().unwrap(); // Unwrap fail = no tx buffer
232        let progress = self.tx_progress.get();
233
234        self.tx_client
235            .map(move |client| client.transmitted_buffer(buffer, progress, Err(ErrorCode::CANCEL)));
236    }
237
238    // This is either called as a deferred call or by a
239    // hardware-generated interrupt, hence it is guaranteed to be an
240    // callback
241    fn resume_tx(&self) {
242        // Context: when called from an interrupt, the event source
243        // has already been cleared
244
245        let len = self.tx_len.get();
246        let mut progress = self.tx_progress.get();
247        let buffer = self.tx_buffer.take().unwrap(); // Unwrap fail = no tx buffer
248
249        // Try to transmit any remaining data
250
251        // Store this to check whether we will get another interrupt
252        //
253        // An interrupt will be generated if fifo_full is true
254        // (i.e. the fifo limit has been reached) OR if after the
255        // while loop, the TX event is already pending (meaning we've
256        // reached the fifo limit AND the end of operation at the same
257        // time, but the hardware has managed to transmit a byte
258        // before we had a chance to read `fifo_full`)
259        let mut fifo_full: bool;
260        while {
261            fifo_full = ReadRegWrapper::wrap(&self.uart_regs.txfull).is_set(txfull::full);
262            !fifo_full && progress < len
263        } {
264            WriteRegWrapper::wrap(&self.uart_regs.rxtx).write(rxtx::data.val(buffer[progress]));
265            progress += 1;
266        }
267
268        if progress < len {
269            // If we haven't transmitted all data, we _must_ have
270            // reached the fifo-limit
271            assert!(fifo_full);
272
273            // Place all information and buffers back for the next
274            // call to `resume_tx`, triggered by an interrupt.
275            self.tx_progress.set(progress);
276            self.tx_buffer.replace(buffer);
277        } else if fifo_full || self.uart_regs.ev().event_pending(EVENT_MANAGER_INDEX_TX) {
278            // All data is transmitted, but an interrupt will still be
279            // generated, for which we wait
280
281            // Place all information and buffers back for the next
282            // call to `resume_tx`
283            self.tx_progress.set(progress);
284            self.tx_buffer.replace(buffer);
285        } else {
286            // All data is transmitted and we will get no further
287            // interrupt
288            //
289            // Disable TX events until the next transmission and call back to the client
290            self.uart_regs.ev().disable_event(EVENT_MANAGER_INDEX_TX);
291            self.tx_client
292                .map(move |client| client.transmitted_buffer(buffer, len, Ok(())));
293        }
294    }
295}
296
297impl<R: LiteXSoCRegisterConfiguration> uart::Configure for LiteXUart<'_, R> {
298    fn configure(&self, params: uart::Parameters) -> Result<(), ErrorCode> {
299        // LiteX UART supports only
300        // - a fixed with of 8 bits
301        // - no parity
302        // - 1 stop bit
303        // - no hardware flow control(?)
304        if let Some((ref phy_regs, system_clock)) = self.phy {
305            if params.width != uart::Width::Eight
306                || params.parity != uart::Parity::None
307                || params.stop_bits != uart::StopBits::One
308                || params.hw_flow_control
309            {
310                Err(ErrorCode::NOSUPPORT)
311            } else if params.baud_rate == 0 || params.baud_rate > system_clock {
312                Err(ErrorCode::INVAL)
313            } else {
314                let tuning_word = if params.baud_rate == system_clock {
315                    u32::MAX
316                } else {
317                    (((params.baud_rate as u64) * (1 << 32)) / (system_clock as u64)) as u32
318                };
319                phy_regs.tuning_word.set(tuning_word);
320
321                Ok(())
322            }
323        } else {
324            Err(ErrorCode::NOSUPPORT)
325        }
326    }
327}
328
329impl<'a, R: LiteXSoCRegisterConfiguration> uart::Transmit<'a> for LiteXUart<'a, R> {
330    fn set_transmit_client(&self, client: &'a dyn uart::TransmitClient) {
331        self.tx_client.set(client);
332    }
333
334    fn transmit_buffer(
335        &self,
336        tx_buffer: &'static mut [u8],
337        tx_len: usize,
338    ) -> Result<(), (ErrorCode, &'static mut [u8])> {
339        // Make sure the UART is initialized
340        assert!(self.initialized.get());
341
342        if tx_buffer.len() < tx_len {
343            return Err((ErrorCode::SIZE, tx_buffer));
344        }
345
346        if self.tx_buffer.is_some() {
347            return Err((ErrorCode::BUSY, tx_buffer));
348        }
349
350        // Enable TX events (interrupts)
351        self.uart_regs.ev().clear_event(EVENT_MANAGER_INDEX_TX);
352        self.uart_regs.ev().enable_event(EVENT_MANAGER_INDEX_TX);
353
354        // Try to send the buffer
355        //
356        // If it does not fill the FIFO, an
357        // interrupt will _not_ be generated and hence we have to
358        // perform the callback using a deferred call.
359        //
360        // If we fill up the FIFO, an interrupt _will_ be
361        // generated. We can transmit the rest using `resume_tx` and
362        // directly call the callback there, as we are guaranteed to
363        // be in a callback.
364        //
365        // An interrupt will be generated if fifo_full is true
366        // (i.e. the fifo limit has been reached) OR if after the
367        // while loop, the TX event is already pending (meaning we've
368        // reached the fifo limit AND the end of operation at the same
369        // time, but the hardware has managed to transmit a byte
370        // before we had a chance to read `fifo_full`)
371        let mut fifo_full: bool;
372        let mut progress: usize = 0;
373        while {
374            fifo_full = ReadRegWrapper::wrap(&self.uart_regs.txfull).is_set(txfull::full);
375            (progress < tx_len) && !fifo_full
376        } {
377            WriteRegWrapper::wrap(&self.uart_regs.rxtx).write(rxtx::data.val(tx_buffer[progress]));
378            progress += 1;
379        }
380
381        // Store the respective values (implicitly setting the device as busy)
382        self.tx_progress.set(progress);
383        self.tx_len.set(tx_len);
384        self.tx_buffer.replace(tx_buffer);
385        self.tx_aborted.set(false);
386
387        // If we did not reach the fifo-limit, the entire buffer
388        // _must_ have been written to the device
389        //
390        // In this case, we must request a deferred call for the
391        // callback, as an interrupt will not be generated.
392        //
393        // However, we might have reached the fifo limit but not
394        // noticed, as the device has sent a byte between writing rxtx
395        // and reading txfull. Hence, if an event is pending, rely on
396        // the fact that an interrupt will be generated.
397        if !(fifo_full || self.uart_regs.ev().event_pending(EVENT_MANAGER_INDEX_TX)) {
398            assert!(progress == tx_len);
399
400            self.tx_deferred_call.set(true);
401            self.deferred_call.set();
402        }
403
404        // If fifo_full == true, we will get an interrupt
405
406        Ok(())
407    }
408
409    fn transmit_word(&self, _word: u32) -> Result<(), ErrorCode> {
410        // Make sure the UART is initialized
411        assert!(self.initialized.get());
412
413        Err(ErrorCode::FAIL)
414    }
415
416    fn transmit_abort(&self) -> Result<(), ErrorCode> {
417        // Disable TX events
418        //
419        // A deferred call might still be pending from the started
420        // transmission, however that will be routed to
421        // `deferred_tx_abort` if `tx_aborted` is set
422
423        // Make sure the UART is initialized
424        assert!(self.initialized.get());
425
426        self.uart_regs.ev().disable_event(EVENT_MANAGER_INDEX_TX);
427
428        if self.tx_buffer.is_some() {
429            self.tx_aborted.set(true);
430            self.tx_deferred_call.set(true);
431            self.deferred_call.set();
432
433            Err(ErrorCode::BUSY)
434        } else {
435            Ok(())
436        }
437    }
438}
439
440impl<'a, R: LiteXSoCRegisterConfiguration> uart::Receive<'a> for LiteXUart<'a, R> {
441    fn set_receive_client(&self, client: &'a dyn uart::ReceiveClient) {
442        self.rx_client.set(client);
443    }
444
445    fn receive_buffer(
446        &self,
447        rx_buffer: &'static mut [u8],
448        rx_len: usize,
449    ) -> Result<(), (ErrorCode, &'static mut [u8])> {
450        // Make sure the UART is initialized
451        assert!(self.initialized.get());
452
453        if rx_len > rx_buffer.len() {
454            return Err((ErrorCode::SIZE, rx_buffer));
455        }
456
457        if self.rx_buffer.is_some() {
458            return Err((ErrorCode::BUSY, rx_buffer));
459        }
460
461        // Store the slice and length for receiving, set the progress
462        // to 0
463        self.rx_buffer.replace(rx_buffer);
464        self.rx_len.set(rx_len);
465        self.rx_progress.set(0);
466        self.rx_aborted.set(false);
467
468        // If there is already data in the FIFO but the event is not
469        // pending (has been cleared), request a deferred call,
470        // otherwise rely on the interrupts
471        //
472        // This is required as the EventSourceProcess only triggers on
473        // a falling edge, which will not happen if the FIFO had valid
474        // data left over from the previous transaction.
475        if !ReadRegWrapper::wrap(&self.uart_regs.rxempty).is_set(rxempty::empty)
476            && !self.uart_regs.ev().event_pending(EVENT_MANAGER_INDEX_RX)
477        {
478            // We do not enable interrupts just yet, but rely on a
479            // deferred call for the bytes left over from a previous
480            // transaction in the FIFO
481            //
482            // Enable the event interrupt in the deferred callback
483            // instead! Otherwise we risk double-delivery of the
484            // interrupt _and_ the deferred call
485            self.rx_deferred_call.set(true);
486            self.deferred_call.set();
487        } else {
488            // We do _not_ clear any pending data in the FIFO by
489            // acknowledging previous events
490            self.uart_regs.ev().enable_event(EVENT_MANAGER_INDEX_RX);
491        }
492
493        Ok(())
494    }
495
496    fn receive_word(&self) -> Result<(), ErrorCode> {
497        // Make sure the UART is initialized
498        assert!(self.initialized.get());
499        Err(ErrorCode::FAIL)
500    }
501
502    fn receive_abort(&self) -> Result<(), ErrorCode> {
503        // Make sure the UART is initialized
504        assert!(self.initialized.get());
505
506        // Disable RX events
507        self.uart_regs.ev().disable_event(EVENT_MANAGER_INDEX_RX);
508
509        if self.rx_buffer.is_some() {
510            // Set the UART transmission to aborted and request a deferred
511            // call
512            self.rx_aborted.set(true);
513            self.rx_deferred_call.set(true);
514            self.deferred_call.set();
515
516            Err(ErrorCode::BUSY)
517        } else {
518            Ok(())
519        }
520    }
521}
522
523impl<R: LiteXSoCRegisterConfiguration> DeferredCallClient for LiteXUart<'_, R> {
524    fn register(&'static self) {
525        self.deferred_call.register(self)
526    }
527
528    fn handle_deferred_call(&self) {
529        // Are we currently in a TX or RX transaction?
530        if self.tx_deferred_call.get() {
531            self.tx_deferred_call.set(false);
532            // Has the transmission been aborted?
533            if self.tx_aborted.get() {
534                self.deferred_tx_abort();
535            } else {
536                // The buffer has been completely transmitted in the initial
537                // `transmit_buffer` call, finish the operation
538                self.resume_tx();
539            }
540        }
541
542        if self.rx_deferred_call.get() {
543            self.rx_deferred_call.set(false);
544            // Has the reception been aborted?
545            if self.rx_aborted.get() {
546                self.deferred_rx_abort();
547            } else {
548                // The deferred call is used as there is some leftover
549                // data in the FIFO from a previous transaction, which
550                // won't trigger the falling-edge based
551                // EventSourceProcess
552                //
553                // We need to instead enable interrupts here (can't be
554                // done in the original receive_buffer method, as that
555                // would risk double-delivery of interrupts and
556                // deferred calls)
557                self.uart_regs.ev().enable_event(EVENT_MANAGER_INDEX_RX);
558                self.rx_data();
559            }
560        }
561    }
562}