kernel/deferred_call.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Hardware-independent kernel interface for deferred calls.
6//!
7//! This allows any struct in the kernel which implements [`DeferredCallClient`]
8//! to set and receive deferred calls, Tock's version of software interrupts.
9//!
10//! These can be used to implement long-running in-kernel algorithms or software
11//! devices that are supposed to work like hardware devices. Essentially, this
12//! allows the chip to handle more important interrupts, and lets a kernel
13//! component return the function call stack up to the scheduler, automatically
14//! being called again.
15//!
16//! Initialization
17//! --------------
18//!
19//! Before any [`DeferredCall`]s are created, the internal state used by the
20//! implementation must be initialized. Boards must initialize deferred calls
21//! by calling either [`initialize_deferred_call_state`] or
22//! [`initialize_deferred_call_state_unsafe`]. Depending on the hardware state
23//! available (i.e., atomic support), boards will only have one initialization
24//! routine available.
25//!
26//! On boards that must use the unsafe version
27//! ([`initialize_deferred_call_state_unsafe`]), they must be careful to only
28//! call [`initialize_deferred_call_state_unsafe`] once from the main execution
29//! thread to meet the safety requirements.
30//!
31//! Usage
32//! -----
33//!
34//! The `DEFCALLS` array size determines how many [`DeferredCall`]s may be
35//! registered. By default this is set to 32. To support more deferred calls,
36//! this file would need to be modified to use a larger variable for `BITMASK`
37//! (e.g. `BITMASK` could be a u64 and the array size increased to 64). If more
38//! than 32 deferred calls are created, the kernel will panic at the beginning
39//! of the kernel loop.
40//!
41//! ```rust
42//! use kernel::deferred_call::{DeferredCall, DeferredCallClient};
43//! use kernel::static_init;
44//!
45//! // Don't use this! Use the actual `ThreadIdProvider` from your arch crate.
46//! struct DummyThreadIdProvider {}
47//! unsafe impl kernel::platform::chip::ThreadIdProvider for DummyThreadIdProvider {
48//! fn running_thread_id() -> usize { 0 }
49//! }
50//!
51//! // Initialize the deferred call mechanism.
52//! #[cfg(target_has_atomic = "ptr")]
53//! kernel::deferred_call::initialize_deferred_call_state::<DummyThreadIdProvider>();
54//! #[cfg(not(target_has_atomic = "ptr"))]
55//! unsafe { kernel::deferred_call::initialize_deferred_call_state_unsafe::<DummyThreadIdProvider>(); }
56//!
57//!
58//! struct SomeCapsule {
59//! deferred_call: DeferredCall
60//! }
61//! impl SomeCapsule {
62//! pub fn new() -> Self {
63//! Self {
64//! deferred_call: DeferredCall::new(),
65//! }
66//! }
67//! }
68//! impl DeferredCallClient for SomeCapsule {
69//! fn handle_deferred_call(&self) {
70//! // Your action here
71//! }
72//!
73//! fn register(&'static self) {
74//! self.deferred_call.register(self);
75//! }
76//! }
77//!
78//! // main.rs or your component must register the capsule with its deferred
79//! // call. This should look like:
80//! let some_capsule = unsafe { static_init!(SomeCapsule, SomeCapsule::new()) };
81//! some_capsule.register();
82//! ```
83
84use crate::platform::chip::ThreadIdProvider;
85use crate::utilities::cells::MapCell;
86use crate::utilities::cells::OptionalCell;
87use crate::utilities::single_thread_value::SingleThreadValue;
88use core::cell::Cell;
89use core::marker::Copy;
90use core::marker::PhantomData;
91
92/// This trait should be implemented by clients which need to receive
93/// [`DeferredCall`]s.
94// This trait is not intended to be used as a trait object; e.g. you should not
95// create a `&dyn DeferredCallClient`. The `Sized` supertrait prevents this.
96pub trait DeferredCallClient: Sized {
97 /// Software interrupt function that is called when the deferred call is
98 /// triggered.
99 fn handle_deferred_call(&self);
100
101 // This function should be implemented as
102 // `self.deferred_call.register(&self);`.
103 fn register(&'static self);
104}
105
106/// This struct serves as a lightweight alternative to the use of trait objects
107/// (e.g. `&dyn DeferredCall`). Using a trait object will include a 20 byte
108/// vtable per instance, but this alternative stores only the data and function
109/// pointers, 8 bytes per instance.
110#[derive(Copy, Clone)]
111struct DynDefCallRef<'a> {
112 data: *const (),
113 callback: fn(*const ()),
114 _lifetime: PhantomData<&'a ()>,
115}
116
117impl<'a> DynDefCallRef<'a> {
118 // SAFETY: We define the callback function as being a closure which casts
119 // the passed pointer to be the appropriate type (a pointer to `T`) and then
120 // calls `T::handle_deferred_call()`. In practice, the closure is optimized
121 // away by LLVM when the ABI of the closure and the underlying function are
122 // identical, making this zero-cost, but saving us from having to trust that
123 // `fn(*const ())` and `fn handle_deferred_call(&self)` will always have the
124 // same calling convention for any type.
125 fn new<T: DeferredCallClient>(x: &'a T) -> Self {
126 Self {
127 data: core::ptr::from_ref(x) as *const (),
128 callback: |p| unsafe { T::handle_deferred_call(&*p.cast()) },
129 _lifetime: PhantomData,
130 }
131 }
132}
133
134impl DynDefCallRef<'_> {
135 // More efficient to pass by `self` if we don't have to implement
136 // `DeferredCallClient` directly.
137 fn handle_deferred_call(self) {
138 (self.callback)(self.data)
139 }
140}
141
142/// Counter for the number of deferred calls that have been created, this is
143/// used to track that no more than 32 deferred calls have been created.
144// All 3 of the below global statics are accessed only in this file, and all
145// accesses are via immutable references. Tock is single threaded, so each will
146// only ever be accessed via an immutable reference from the single kernel
147// thread. TODO: Once Tock decides on an approach to replace `static mut` with
148// some sort of `SyncCell`, migrate all three of these to that approach
149// (https://github.com/tock/tock/issues/1545).
150static CTR: SingleThreadValue<Cell<usize>> = SingleThreadValue::new(Cell::new(0));
151
152/// This bitmask tracks which of the up to 32 existing deferred calls have been
153/// scheduled. Any bit that is set in that mask indicates the deferred call with
154/// its [`DeferredCall::idx`] field set to the index of that bit has been
155/// scheduled and not yet serviced.
156static BITMASK: SingleThreadValue<Cell<u32>> = SingleThreadValue::new(Cell::new(0));
157
158/// An array that stores references to up to 32 `DeferredCall`s via the low-cost
159/// [`DynDefCallRef`].
160// This is a 256 byte array, but at least resides in `.bss`.
161static DEFCALLS: SingleThreadValue<MapCell<[OptionalCell<DynDefCallRef<'static>>; 32]>> =
162 SingleThreadValue::new(MapCell::new([const { OptionalCell::empty() }; 32]));
163
164/// Initialize the static state used by deferred calls.
165///
166/// This ensures it can safely be used as a global variable.
167#[cfg(target_has_atomic = "ptr")]
168pub fn initialize_deferred_call_state<P: ThreadIdProvider>() {
169 CTR.bind_to_thread::<P>();
170 BITMASK.bind_to_thread::<P>();
171 DEFCALLS.bind_to_thread::<P>();
172}
173
174/// Initialize the static state used by deferred calls.
175///
176/// This ensures it can safely be used as a global variable.
177///
178/// # Safety
179///
180/// Callers of this function must ensure that this function is never called
181/// concurrently with calls to [`initialize_deferred_call_state`] or other calls
182/// to [`initialize_deferred_call_state_unsafe`].
183pub unsafe fn initialize_deferred_call_state_unsafe<P: ThreadIdProvider>() {
184 CTR.bind_to_thread_unsafe::<P>();
185 BITMASK.bind_to_thread_unsafe::<P>();
186 DEFCALLS.bind_to_thread_unsafe::<P>();
187}
188
189pub struct DeferredCall {
190 idx: usize,
191}
192
193impl DeferredCall {
194 /// Create a new deferred call with a unique ID.
195 pub fn new() -> Self {
196 if let Some(ctr) = CTR.get() {
197 let idx = ctr.get();
198 ctr.set(idx + 1);
199 DeferredCall { idx }
200 } else {
201 // If this panic occurs, the platform did not call
202 // `initialize_deferred_call_state()` or
203 // `initialize_deferred_call_state_unsafe()` before creating a
204 // DeferredCall.
205 //
206 // We panic here rather than return an option or result because
207 // there is no recourse for the caller. This is an unrecoverable
208 // issue in practice and a bug in the kernel. The board must call
209 // one of the initialization functions first.
210 panic!("DeferredCall state not initialized.");
211 }
212 }
213
214 // To reduce monomorphization bloat, the non-generic portion of register is
215 // moved into this function without generic parameters.
216 #[inline(never)]
217 fn register_internal_non_generic(&self, handler: DynDefCallRef<'static>) {
218 if let Some(defcalls_cell) = DEFCALLS.get() {
219 defcalls_cell.map(|defcalls| {
220 if self.idx >= defcalls.len() {
221 // This error will be caught by the scheduler at the beginning of
222 // the kernel loop, which is much better than panicking here, before
223 // the debug writer is setup. Also allows a single panic for
224 // creating too many deferred calls instead of NUM_DCS panics (this
225 // function is monomorphized).
226 return;
227 }
228 defcalls[self.idx].set(handler);
229 });
230 }
231 }
232
233 /// This function registers the passed client with this deferred call, such
234 /// that calls to [`DeferredCall::set()`] will schedule a callback on the
235 /// [`handle_deferred_call()`](DeferredCallClient::handle_deferred_call)
236 /// method of the passed client.
237 pub fn register<DC: DeferredCallClient>(&self, client: &'static DC) {
238 let handler = DynDefCallRef::new(client);
239 self.register_internal_non_generic(handler);
240 }
241
242 /// Schedule a deferred callback on the client associated with this deferred
243 /// call.
244 pub fn set(&self) {
245 if let Some(bitmask) = BITMASK.get() {
246 bitmask.set(bitmask.get() | (1 << self.idx));
247 }
248 }
249
250 /// Check if a deferred callback has been set and not yet serviced on this
251 /// deferred call.
252 pub fn is_pending(&self) -> bool {
253 if let Some(bitmask) = BITMASK.get() {
254 bitmask.get() & (1 << self.idx) == 1
255 } else {
256 false
257 }
258 }
259
260 /// Services and clears the next pending [`DeferredCall`], returns which
261 /// index was serviced.
262 pub fn service_next_pending() -> Option<usize> {
263 let defcalls_cell = DEFCALLS.get()?;
264 defcalls_cell.map_or(None, |defcalls| {
265 let bitmask = BITMASK.get()?;
266 let val = bitmask.get();
267 if val == 0 {
268 None
269 } else {
270 let bit = val.trailing_zeros() as usize;
271 let new_val = val & !(1 << bit);
272 bitmask.set(new_val);
273 defcalls[bit].map(|dc| {
274 dc.handle_deferred_call();
275 bit
276 })
277 }
278 })
279 }
280
281 /// Returns true if any deferred calls are waiting to be serviced, false
282 /// otherwise.
283 pub fn has_tasks() -> bool {
284 BITMASK.get().is_some_and(|b| b.get() != 0)
285 }
286
287 /// This function should be called at the beginning of the kernel loop to
288 /// verify that deferred calls have been correctly initialized. This
289 /// function verifies three things:
290 ///
291 /// 1. That `DEFCALLS` and the other [`SingleThreadValue`] types have been
292 /// bound to a thread. This happens during
293 /// [`initialize_deferred_call_state`] or
294 /// [`initialize_deferred_call_state_unsafe`].
295 ///
296 /// 2. That <= [`DEFCALLS.len()`] deferred calls have been created, which is
297 /// the maximum this interface supports.
298 ///
299 /// 3. That exactly as many deferred calls were registered as were created,
300 /// which helps to catch bugs if board maintainers forget to call
301 /// [`register()`](DeferredCall::register) on a created [`DeferredCall`].
302 ///
303 /// None of these checks is necessary for soundness, but they are
304 /// necessary for confirming that [`DeferredCall`]s will actually be
305 /// delivered as expected. This function costs about 300 bytes, so you can
306 /// remove it if you are confident your setup will not exceed 32 deferred
307 /// calls, and that all of your components register their deferred calls.
308 // Ignore the clippy warning for using `.filter(|opt| opt.is_some())` since
309 // we don't actually have an Option (we have an OptionalCell) and
310 // IntoIterator is not implemented for OptionalCell.
311 #[allow(clippy::iter_filter_is_some)]
312 pub fn verify_setup() {
313 if let Some(defcalls_cell) = DEFCALLS.get() {
314 defcalls_cell.map(|defcalls| {
315 if let Some(ctr) = CTR.get() {
316 let num_deferred_calls = ctr.get();
317 let num_registered_calls = defcalls.iter().filter(|opt| opt.is_some()).count();
318 if num_deferred_calls > defcalls.len() {
319 panic!("ERROR: too many deferred calls: {}", num_deferred_calls);
320 } else if num_deferred_calls != num_registered_calls {
321 panic!(
322 "ERROR: {} deferred calls, {} registered. \
323A component may have forgotten to register a deferred call.",
324 num_deferred_calls, num_registered_calls
325 );
326 }
327 }
328 });
329 } else {
330 // The board must call initialize_deferred_call_state() or
331 // initialize_deferred_call_state_unsafe() before creating any
332 // deferred calls.
333 panic!("ERROR: deferred calls not initialized.");
334 }
335 }
336}