x86/interrupts/poller.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2024.
4
5use core::ptr;
6
7use tock_cells::volatile_cell::VolatileCell;
8use tock_registers::LocalRegisterCopy;
9
10use crate::support;
11
12use super::NUM_VECTORS;
13
14/// A mechanism for synchronously managing and polling x86 interrupts.
15///
16/// Tock uses synchronous polling to service interrupts. This means that the kernel's main loop will
17/// periodically call some function to detect and service interrupts. The reasoning for this
18/// approach (as opposed to doing work directly within ISRs) is so we can lean on Rust's borrow
19/// checker to avoid race conditions.
20///
21/// The `InterruptPoller` type provides a somewhat higher-level API for working with x86 interrupts
22/// that fits well with Tock's synchronous lifecycle. It is modeled after the `Plic` API from the
23/// `e310x` chip crate.
24///
25/// Internally, it maintains a large bitfield with a separate flag for every possible interrupt
26/// vector (total of `NUM_VECTORS`). When an interrupt occurs, a very lightweight ISR is
27/// responsible for setting the corresponding flag. To poll for pending interrupts from within the
28/// kernel loop, we simply need to iterate over this bitfield and return the index of each active
29/// bit.
30///
31/// Note that for reasons of safety, `InterruptPoller` is a singleton. You cannot create an instance
32/// directly. Instead, you must access the singleton instance using `InterruptPoller::access`.
33pub struct InterruptPoller {
34 /// Tracks the pending status of each interrupt
35 pending: [VolatileCell<LocalRegisterCopy<u32>>; NUM_VECTORS / 32],
36}
37
38/// The singleton poller instance
39///
40/// We use a `static mut` singleton so that the instance can be accessed directly from interrupt
41/// handler routines.
42///
43/// ## Safety
44///
45/// As with any `static mut` item, the poller singleton must not be accessed concurrently. To
46/// enforce this restriction, this module exposes two constrained methods for accessing the
47/// instance: `InterruptPoller::access` and `InterruptPoller::save`.
48static mut SINGLETON: InterruptPoller = InterruptPoller {
49 pending: [
50 VolatileCell::new(LocalRegisterCopy::new(0)),
51 VolatileCell::new(LocalRegisterCopy::new(0)),
52 VolatileCell::new(LocalRegisterCopy::new(0)),
53 VolatileCell::new(LocalRegisterCopy::new(0)),
54 VolatileCell::new(LocalRegisterCopy::new(0)),
55 VolatileCell::new(LocalRegisterCopy::new(0)),
56 VolatileCell::new(LocalRegisterCopy::new(0)),
57 VolatileCell::new(LocalRegisterCopy::new(0)),
58 ],
59};
60
61impl InterruptPoller {
62 /// Provides safe access to the singleton instance of `InterruptPoller`.
63 ///
64 /// The given closure `f` is executed with interrupts disabled (using [`support::atomic`](crate::support::atomic)) and
65 /// passed a reference to the singleton.
66 pub fn access<F, R>(f: F) -> R
67 where
68 F: FnOnce(&InterruptPoller) -> R,
69 {
70 support::atomic(|| {
71 // Safety: Interrupts are disabled within this closure, so we can safely access the
72 // singleton without racing against interrupt handlers.
73 let poller = unsafe { &*ptr::addr_of!(SINGLETON) };
74
75 f(poller)
76 })
77 }
78
79 /// Marks that the specified interrupt as pending.
80 ///
81 /// ## Safety
82 ///
83 /// Interrupts must be disabled when this function is called. This function is _intended_ to be
84 /// called from within an ISR, so hopefully this is already true.
85 pub unsafe fn set_pending(num: u32) {
86 // Safety: Caller ensures interrupts are disabled when this function is called, so it
87 // should be safe to access the singleton without racing against any interrupt
88 // handlers.
89 let poller = unsafe { &*ptr::addr_of!(SINGLETON) };
90
91 let arr_idx = (num / 32) as usize;
92 let bit_idx = num % 32;
93
94 let new_val = poller.pending[arr_idx].get().get() | 1 << bit_idx;
95 poller.pending[arr_idx].set(LocalRegisterCopy::new(new_val));
96 }
97
98 /// Polls for the next pending interrupt.
99 ///
100 /// If multiple interrupts are currently pending, then the highest priority (i.e. numerically
101 /// lowest) is returned.
102 ///
103 /// Once handled, interrupts should call `clear_pending` to clear the interrupt's pending status
104 /// so that lower-priority interrupts can be serviced.
105 pub fn next_pending(&self) -> Option<u32> {
106 for (i, pending) in self.pending.iter().enumerate() {
107 let val = pending.get().get();
108 if val != 0 {
109 return Some(val.trailing_zeros() + (i as u32 * 32));
110 }
111 }
112
113 None
114 }
115
116 /// Clears the pending status of the specified interrupt, allowing lower priority interrupts to
117 /// be serviced.
118 ///
119 /// Don't forget to call this method after servicing an interrupt.
120 pub fn clear_pending(&self, num: u32) {
121 let arr_idx = (num / 32) as usize;
122 let bit_idx = num % 32;
123
124 let new_val = self.pending[arr_idx].get().get() & !(1 << bit_idx);
125 self.pending[arr_idx].set(LocalRegisterCopy::new(new_val));
126 }
127}