capsules_core/console_ordered.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Provides userspace with access to a serial interface whose output
6//! is in-order with respect to kernel debug!() operations.
7//!
8//! Prints to the console are atomic up to particular constant length,
9//! which can be set at capsule instantiation.
10//!
11//! Note that this capsule does *not* buffer writes in an additional
12//! buffer; this is critical to ensure ordering. Instead, it pushes
13//! writes into the kernel debug buffer. If there is insufficient space
14//! in the buffer for the write (or an atomic block size chunk of a very
15//! large write), the capsule waits and uses a retry timer. This means
16//! that in-kernel debug statements can starve userspace prints, e.g.,
17//! if they always keep the kernel debug buffer full.
18//!
19//! Setup
20//! -----
21//!
22//! This capsule allows userspace programs to print to the kernel
23//! debug log. This ensures that (as long as the writes are not
24//! truncated) that kernel and userspace print operations are in
25//! order. It requires a reference to an Alarm for timers to issue
26//! callbacks and send more data. The three configuration constants are:
27//!   - ATOMIC_SIZE: the minimum block of buffer that will be sent. If there is
28//!     not enough space in the debug buffer to send ATOMIC_SIZE bytes, the
29//!     console retries later.
30//!   - RETRY_TIMER: if there is not enough space in the debug buffer to send
31//!     the next chunk of a write, the console waits RETRY_TIMER ticks of the
32//!     supplied alarm.
33//!   - WRITE_TIMER: after completing a write, the console waits WRITE_TIMER
34//!     ticks of the supplied alarm before issuing a callback or writing more.
35//!
36//! RETRY_TIMER and WRITE_TIMER should be set based on the speed of
37//! the underlying UART and desired load. Generally speaking, setting
38//! them around 50-100 byte times is good. For example, this means on
39//! a 115200 UART, setting them to 5ms (576 bits, or 72 bytes) is
40//! reasonable. ATOMIC_SIZE should be at least 80 (row width
41//! of a standard console).
42//!
43//! ```rust,ignore
44//! # use kernel::static_init;
45//! # use capsules_core::console_ordered::ConsoleOrdered;
46//! let console = static_init!(
47//!     ConsoleOrdered,
48//!     ConsoleOrdered::new(virtual_alarm,
49//!                         board_kernel.create_grant(capsules_core::console_ordered::DRIVER_NUM,
50//!                                                   &grant_cap),
51//!                         ATOMIC_SIZE,
52//!                         RETRY_TIMER,
53//!                         WRITE_TIMER));
54//!
55//! ```
56//!
57//! Usage
58//! -----
59//!
60//! The user must perform three steps in order to write a buffer:
61//!
62//! ```c
63//! // (Optional) Set a callback to be invoked when the buffer has been written
64//! subscribe(CONSOLE_DRIVER_NUM, 1, my_callback);
65//! // Share the buffer from userspace with the driver
66//! allow(CONSOLE_DRIVER_NUM, buffer, buffer_len_in_bytes);
67//! // Initiate the transaction
68//! command(CONSOLE_DRIVER_NUM, 1, len_to_write_in_bytes)
69//! ```
70//!
71
72use core::cell::Cell;
73use core::cmp;
74
75use kernel::debug::debug_available_len;
76use kernel::debug_process_slice;
77
78use kernel::grant::{AllowRoCount, AllowRwCount, Grant, GrantKernelData, UpcallCount};
79use kernel::hil::time::{Alarm, AlarmClient, ConvertTicks};
80use kernel::hil::uart;
81use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
82use kernel::syscall::{CommandReturn, SyscallDriver};
83use kernel::utilities::cells::{OptionalCell, TakeCell};
84use kernel::{ErrorCode, ProcessId};
85
86/// Syscall driver number.
87use crate::driver;
88pub const DRIVER_NUM: usize = driver::NUM::Console as usize;
89
90/// Ids for read-only allow buffers
91mod ro_allow {
92    /// Before the allow syscall was handled by the kernel,
93    /// console used allow number "1", so to preserve compatibility
94    /// we still use allow number 1 now.
95    pub const WRITE: usize = 1;
96    /// The number of read-allow buffers (for putstr) the kernel stores for this grant
97    pub const COUNT: u8 = 2;
98}
99
100/// Ids for read-write allow buffers
101mod rw_allow {
102    /// Before the allow syscall was handled by the kernel,
103    /// console used allow number "1", so to preserve compatibility
104    /// we still use allow number 1 now.
105    pub const READ: usize = 1;
106    /// The number of read-write allow buffers (for getstr) the kernel stores for this grant
107    pub const COUNT: u8 = 2;
108}
109
110#[derive(Default)]
111pub struct App {
112    write_position: usize, // Current write position
113    write_len: usize,      // Length of total write
114    writing: bool,         // Are we in the midst of a write
115    pending_write: bool,   // Are we waiting to write
116    tx_counter: usize,     // Used to keep order of writes
117    read_len: usize,       // Read length
118    rx_counter: usize,     // Used to order reads (no starvation)
119}
120
121pub struct ConsoleOrdered<'a, A: Alarm<'a>> {
122    uart: &'a dyn uart::Receive<'a>,
123    apps: Grant<
124        App,
125        UpcallCount<3>,
126        AllowRoCount<{ ro_allow::COUNT }>,
127        AllowRwCount<{ rw_allow::COUNT }>,
128    >,
129    tx_in_progress: Cell<bool>, // If true there's an ongoing write so others must wait
130    tx_counter: Cell<usize>,    // Sequence number for writes from different processes
131    alarm: &'a A,               // Timer for trying to send  more
132
133    rx_counter: Cell<usize>,
134    rx_in_progress: OptionalCell<ProcessId>,
135    rx_buffer: TakeCell<'static, [u8]>,
136
137    atomic_size: Cell<usize>, // The maximum size write the capsule promises atomicity;
138    // larger writes may be broken into atomic_size chunks.
139    // This must be smaller than the debug buffer size or a long
140    // write may never print.
141    retry_timer: Cell<u32>, // How long the capsule will wait before retrying if there
142    // is insufficient space in the debug buffer (alarm ticks)
143    // when a write is first attempted.
144    write_timer: Cell<u32>, // Time to wait after a successful write into the debug buffer,
145                            // before checking whether write more or issue a callback that
146                            // the current write has completed (alarm ticks).
147}
148
149impl<'a, A: Alarm<'a>> ConsoleOrdered<'a, A> {
150    pub fn new(
151        uart: &'a dyn uart::Receive<'a>,
152        alarm: &'a A,
153        rx_buffer: &'static mut [u8],
154        grant: Grant<
155            App,
156            UpcallCount<3>,
157            AllowRoCount<{ ro_allow::COUNT }>,
158            AllowRwCount<{ rw_allow::COUNT }>,
159        >,
160        atomic_size: usize,
161        retry_timer: u32,
162        write_timer: u32,
163    ) -> ConsoleOrdered<'a, A> {
164        ConsoleOrdered {
165            uart,
166            apps: grant,
167            tx_in_progress: Cell::new(false),
168            tx_counter: Cell::new(0),
169            alarm,
170
171            rx_counter: Cell::new(0),
172            rx_in_progress: OptionalCell::empty(),
173            rx_buffer: TakeCell::new(rx_buffer),
174
175            atomic_size: Cell::new(atomic_size),
176            retry_timer: Cell::new(retry_timer),
177            write_timer: Cell::new(write_timer),
178        }
179    }
180
181    /// Internal helper function for starting up a new print; allocate a sequence number and
182    /// start the send state machine.
183    fn send_new(
184        &self,
185        app: &mut App,
186        kernel_data: &GrantKernelData,
187        len: usize,
188    ) -> Result<(), ErrorCode> {
189        // We are already writing
190        if app.writing || app.pending_write {
191            return Err(ErrorCode::BUSY);
192        }
193        app.write_position = 0;
194        app.write_len = kernel_data
195            .get_readonly_processbuffer(ro_allow::WRITE)
196            .map_or(0, |write| write.len())
197            .min(len);
198        // We have nothing to write
199        if app.write_len == 0 {
200            return Err(ErrorCode::NOMEM);
201        }
202        // Order the prints through a global counter.
203        app.tx_counter = self.tx_counter.get();
204        self.tx_counter.set(app.tx_counter.wrapping_add(1));
205
206        let debug_space_avail = debug_available_len();
207
208        if self.tx_in_progress.get() {
209            // A prior print is outstanding, enqueue
210            app.pending_write = true;
211        } else if app.write_len <= debug_space_avail {
212            // Space for the full write, make it
213            app.write_position = self.send(app, kernel_data).map_or(0, |len| len);
214        } else if self.atomic_size.get() <= debug_space_avail {
215            // Space for a partial write, make it
216            app.write_position = self.send(app, kernel_data).map_or(0, |len| len);
217        } else {
218            // No space even for a partial, minimum size write: enqueue
219            app.pending_write = true;
220            self.alarm.set_alarm(
221                self.alarm.now(),
222                self.alarm.ticks_from_ms(self.retry_timer.get()),
223            );
224        }
225        Ok(())
226    }
227
228    /// Internal helper function for sending data. Assumes that there is enough
229    /// space in the debug buffer for the write. Writes longer than available
230    /// debug buffer space will be truncated, so callers that wish to not lose
231    /// data must check before calling.
232    fn send(
233        &self,
234        app: &mut App,
235        kernel_data: &GrantKernelData,
236    ) -> Result<usize, kernel::process::Error> {
237        // We can ignore the Result because if the call fails, it means
238        // the process has terminated, so issuing a callback doesn't matter.
239        // If the call fails, just use the alarm to try the next client.
240        let res = kernel_data
241            .get_readonly_processbuffer(ro_allow::WRITE)
242            .and_then(|write| {
243                write.enter(|data| {
244                    // The slice might have become shorter than the requested
245                    // write; if so, just write what there is.
246                    let remaining_len = app.write_len - app.write_position;
247                    let real_write_len = cmp::min(remaining_len, debug_available_len());
248                    let this_write_end = app.write_position + real_write_len;
249                    let remaining_data = match data.get(app.write_position..this_write_end) {
250                        Some(remaining_data) => remaining_data,
251                        None => data,
252                    };
253
254                    app.writing = true;
255                    self.tx_in_progress.set(true);
256                    if real_write_len > 0 {
257                        // If the debug printing fails we have no recourse. Just
258                        // pretend like we wrote everything so we make forward
259                        // progress, and maybe the debug writer will start
260                        // working in the future.
261                        debug_process_slice!(remaining_data).unwrap_or(real_write_len)
262                    } else {
263                        0
264                    }
265                })
266            });
267        // Start a timer to signal completion of this write
268        // and potentially write more.
269        self.alarm.set_alarm(
270            self.alarm.now(),
271            self.alarm.ticks_from_ms(self.write_timer.get()),
272        );
273        res
274    }
275
276    /// Internal helper function for starting a receive operation. Processes
277    /// do not share reads, they take turns, with turn order monitored through
278    /// a sequence number.
279    fn receive_new(
280        &self,
281        processid: ProcessId,
282        app: &mut App,
283        kernel_data: &GrantKernelData,
284        len: usize,
285    ) -> Result<(), ErrorCode> {
286        if app.read_len != 0 {
287            // We are busy reading, don't try again
288            Err(ErrorCode::BUSY)
289        } else if len == 0 {
290            //  Cannot read length 0
291            Err(ErrorCode::INVAL)
292        } else if self.rx_buffer.is_none() {
293            // Console is busy receiving, so enqueue
294            app.rx_counter = self.rx_counter.get();
295            self.rx_counter.set(app.rx_counter + 1);
296            app.read_len = len;
297            Ok(())
298        } else {
299            // App can try to start a read
300            let read_len = kernel_data
301                .get_readwrite_processbuffer(rw_allow::READ)
302                .map_or(0, |read| read.len())
303                .min(len);
304            if read_len > self.rx_buffer.map_or(0, |buf| buf.len()) {
305                // For simplicity, impose a small maximum receive length
306                // instead of doing incremental reads
307                Err(ErrorCode::INVAL)
308            } else {
309                // Note: We have ensured above that rx_buffer is present
310                app.read_len = read_len;
311                self.rx_buffer.take().map(|buffer| {
312                    self.rx_in_progress.set(processid);
313                    let _ = self.uart.receive_buffer(buffer, app.read_len);
314                });
315                Ok(())
316            }
317        }
318    }
319}
320
321impl<'a, A: Alarm<'a>> AlarmClient for ConsoleOrdered<'a, A> {
322    fn alarm(&self) {
323        if self.tx_in_progress.get() {
324            // Clear here and set it later; if .enter fails (process
325            // has died) it remains cleared.
326            self.tx_in_progress.set(false);
327
328            // Check if the current writer is finished; if so, issue an upcall, if not,
329            // try to write more.
330            for cntr in self.apps.iter() {
331                cntr.enter(|app, kernel_data| {
332                    // This is the in-progress write
333                    if app.writing {
334                        if app.write_position >= app.write_len {
335                            let _res = kernel_data.schedule_upcall(1, (app.write_len, 0, 0));
336                            app.writing = false;
337                        } else {
338                            // Still have more to write, don't allow others to jump in.
339                            self.tx_in_progress.set(true);
340
341                            // Promise to write to the end, or the atomic write unit, whichever is smaller
342                            let remaining_len = app.write_len - app.write_position;
343                            let debug_space_avail = debug_available_len();
344                            let minimum_write = cmp::min(remaining_len, self.atomic_size.get());
345
346                            // Write, or if there isn't space for a minimum write, retry later
347                            if minimum_write <= debug_space_avail {
348                                app.write_position +=
349                                    self.send(app, kernel_data).map_or(0, |len| len);
350                            } else {
351                                self.alarm.set_alarm(
352                                    self.alarm.now(),
353                                    self.alarm.ticks_from_ms(self.retry_timer.get()),
354                                );
355                            }
356                        }
357                    }
358                });
359            }
360        }
361
362        // There's no ongoing send, try to send the next one (process with
363        // lowest sequence number).
364        if !self.tx_in_progress.get() {
365            // Find if there's another writer and mark it busy.
366            let mut next_writer: Option<ProcessId> = None;
367            let mut seqno = self.tx_counter.get();
368
369            // Find the process that has an outstanding write with the
370            // earliest sequence number, handling wraparound.
371            for cntr in self.apps.iter() {
372                let appid = cntr.processid();
373                cntr.enter(|app, _| {
374                    if app.pending_write {
375                        // Checks wither app.tx_counter is earlier than
376                        // seqno, with the constrain that there are <
377                        // usize/2 processes. wrapping_sub allows this to
378                        // handle wraparound E.g., in 8-bit arithmetic
379                        // 0x02 - 0xff = 0x03 and so 0xff is "earlier"
380                        // than 0x02. -pal
381                        if seqno.wrapping_sub(app.tx_counter) < usize::MAX / 2 {
382                            seqno = app.tx_counter;
383                            next_writer = Some(appid);
384                        }
385                    }
386                });
387            }
388
389            next_writer.map(|pid| {
390                self.apps.enter(pid, |app, kernel_data| {
391                    app.pending_write = false;
392                    let len = app.write_len;
393                    let _ = self.send_new(app, kernel_data, len);
394                })
395            });
396        }
397    }
398}
399
400impl<'a, A: Alarm<'a>> SyscallDriver for ConsoleOrdered<'a, A> {
401    /// Setup shared buffers.
402    ///
403    /// ### `allow_num`
404    ///
405    /// - `0`: Readonly buffer for write buffer
406    ///
407    /// Setup callbacks.
408    ///
409    /// ### `subscribe_num`
410    ///
411    /// - `1`: Write buffer completed callback
412    ///
413    /// Initiate serial transfers
414    ///
415    /// ### `command_num`
416    ///
417    /// - `0`: Driver existence check.
418    /// - `1`: Transmits a buffer passed via `allow`, up to the length passed in
419    ///   `arg1`
420    fn command(&self, cmd_num: usize, arg1: usize, _: usize, appid: ProcessId) -> CommandReturn {
421        let res = self
422            .apps
423            .enter(appid, |app, kernel_data| {
424                match cmd_num {
425                    0 => Ok(()),
426                    1 => {
427                        // putstr
428                        let len = arg1;
429                        self.send_new(app, kernel_data, len)
430                    }
431                    2 => {
432                        // getnstr
433                        let len = arg1;
434                        self.receive_new(appid, app, kernel_data, len)
435                    }
436                    3 => {
437                        // Abort RX
438                        let _ = self.uart.receive_abort();
439                        Ok(())
440                    }
441                    _ => Err(ErrorCode::NOSUPPORT),
442                }
443            })
444            .map_err(ErrorCode::from);
445        match res {
446            Ok(Ok(())) => CommandReturn::success(),
447            Ok(Err(e)) => CommandReturn::failure(e),
448            Err(e) => CommandReturn::failure(e),
449        }
450    }
451
452    fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
453        self.apps.enter(processid, |_, _| {})
454    }
455}
456
457impl<'a, A: Alarm<'a>> uart::ReceiveClient for ConsoleOrdered<'a, A> {
458    fn received_buffer(
459        &self,
460        buffer: &'static mut [u8],
461        rx_len: usize,
462        rcode: Result<(), ErrorCode>,
463        error: uart::Error,
464    ) {
465        // First, handle this read, then see if there's another read to process.
466        self.rx_in_progress
467            .take()
468            .map(|processid| {
469                self.apps
470                    .enter(processid, |app, kernel_data| {
471                        // An iterator over the returned buffer yielding only the first `rx_len`
472                        // bytes
473                        let rx_buffer = buffer.iter().take(rx_len);
474                        app.read_len = 0; // Mark that we are no longer reading.
475                        match error {
476                            uart::Error::None | uart::Error::Aborted => {
477                                // Receive some bytes, signal error type and return bytes to process buffer
478                                let count = kernel_data
479                                    .get_readwrite_processbuffer(rw_allow::READ)
480                                    .and_then(|read| {
481                                        read.mut_enter(|data| {
482                                            let mut c = 0;
483                                            for (a, b) in data.iter().zip(rx_buffer) {
484                                                c += 1;
485                                                a.set(*b);
486                                            }
487                                            c
488                                        })
489                                    })
490                                    .unwrap_or(-1);
491
492                                // Make sure we report the same number
493                                // of bytes that we actually copied into
494                                // the app's buffer. This is defensive:
495                                // we shouldn't ever receive more bytes
496                                // than will fit in the app buffer since
497                                // we use the app_buffer's length when
498                                // calling `receive()`. However, a buggy
499                                // lower layer could return more bytes
500                                // than we asked for, and we don't want
501                                // to propagate that length error to
502                                // userspace. However, we do return an
503                                // error code so that userspace knows
504                                // something went wrong.
505                                //
506                                // If count < 0 this means the buffer
507                                // disappeared: return NOMEM.
508                                let read_buffer_len = kernel_data
509                                    .get_readwrite_processbuffer(rw_allow::READ)
510                                    .map_or(0, |read| read.len());
511                                let (ret, received_length) = if count < 0 {
512                                    (Err(ErrorCode::NOMEM), 0)
513                                } else if rx_len > read_buffer_len {
514                                    // Return `SIZE` indicating that
515                                    // some received bytes were dropped.
516                                    // We report the length that we
517                                    // actually copied into the buffer,
518                                    // but also indicate that there was
519                                    // an issue in the kernel with the
520                                    // receive.
521                                    (Err(ErrorCode::SIZE), read_buffer_len)
522                                } else {
523                                    // This is the normal and expected
524                                    // case.
525                                    (rcode, rx_len)
526                                };
527                                let _ = kernel_data.schedule_upcall(
528                                    2,
529                                    (kernel::errorcode::into_statuscode(ret), received_length, 0),
530                                );
531                            }
532                            _ => {
533                                // Some UART error occurred
534                                let _ = kernel_data.schedule_upcall(
535                                    2,
536                                    (
537                                        kernel::errorcode::into_statuscode(Err(ErrorCode::FAIL)),
538                                        0,
539                                        0,
540                                    ),
541                                );
542                            }
543                        }
544                    })
545                    .unwrap_or_default();
546            })
547            .unwrap_or_default();
548
549        // Whatever happens, we want to make sure to replace the rx_buffer for future transactions
550        self.rx_buffer.replace(buffer);
551
552        // Find if there's another reader and if so start reading
553        let mut next_reader: Option<ProcessId> = None;
554        let mut seqno = self.tx_counter.get();
555
556        for cntr in self.apps.iter() {
557            let appid = cntr.processid();
558            cntr.enter(|app, _| {
559                if app.read_len != 0 {
560                    // Checks wither app.tx_counter is earlier than
561                    // seqno, with the constrain that there are <
562                    // usize/2 processes. wrapping_sub allows this to
563                    // handle wraparound E.g., in 8-bit arithmetic
564                    // 0x02 - 0xff = 0x03 and so 0xff is "earlier"
565                    // than 0x02. -pal
566                    if seqno.wrapping_sub(app.rx_counter) < usize::MAX / 2 {
567                        seqno = app.rx_counter;
568                        next_reader = Some(appid);
569                    }
570                }
571            });
572        }
573
574        next_reader.map(|pid| {
575            self.apps.enter(pid, |app, kernel_data| {
576                let len = app.read_len;
577                let _ = self.receive_new(pid, app, kernel_data, len);
578            })
579        });
580    }
581}