earlgrey/epmp.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! The EarlGrey SoC ePMP implementation.
6//!
7//! Refer to the main [`EarlGreyEPMP`] struct documentation.
8
9use core::cell::Cell;
10use core::fmt;
11use core::marker::PhantomData;
12use kernel::platform::mpu;
13use kernel::utilities::registers::FieldValue;
14use rv32i::csr;
15use rv32i::pmp::{
16 format_pmp_entries, pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG,
17};
18
19// ---------- EarlGrey ePMP implementation named constants ---------------------
20//
21// The ePMP implementation (in part) relies on these constant values. Simply
22// changing them here may break the implementation below.
23const PMP_ENTRIES: usize = 16;
24const PMP_ENTRIES_OVER_TWO: usize = 8;
25const TOR_USER_REGIONS_DEBUG_ENABLE: usize = 4;
26const TOR_USER_REGIONS_DEBUG_DISABLE: usize = 4;
27const TOR_USER_ENTRIES_OFFSET_DEBUG_ENABLE: usize = 0;
28const TOR_USER_ENTRIES_OFFSET_DEBUG_DISABLE: usize = 4;
29
30// ---------- EarlGrey ePMP memory region wrapper types ------------------------
31//
32// These types exist primarily to avoid argument confusion in the
33// [`EarlGreyEPMP`] constructor, which accepts the addresses of these memory
34// regions as arguments. They further encode whether a region must adhere to the
35// `NAPOT` or `TOR` addressing mode constraints:
36
37/// The EarlGrey SOC's flash memory region address range.
38///
39/// Configured in the PMP as a `NAPOT` region.
40#[derive(Copy, Clone, Debug)]
41pub struct FlashRegion(pub NAPOTRegionSpec);
42
43/// The EarlGrey SOC's RAM region address range.
44///
45/// Configured in the PMP as a `NAPOT` region.
46#[derive(Copy, Clone, Debug)]
47pub struct RAMRegion(pub NAPOTRegionSpec);
48
49/// The EarlGrey SOC's MMIO region address range.
50///
51/// Configured in the PMP as a `NAPOT` region.
52#[derive(Copy, Clone, Debug)]
53pub struct MMIORegion(pub NAPOTRegionSpec);
54
55/// The EarlGrey SOC's PMP region specification for the kernel `.text` section.
56///
57/// This is to be made accessible to machine-mode as read-execute. Configured in
58/// the PMP as a `TOR` region.
59#[derive(Copy, Clone, Debug)]
60pub struct KernelTextRegion(pub TORRegionSpec);
61
62/// The EarlGrey SOC's RISC-V Debug Manager memory region.
63///
64/// Configured in the PMP as a read/write/execute `NAPOT` region. Because R/W/X
65/// regions are not supported in machine-mode lockdown (MML) mode, to enable
66/// JTAG debugging, the generic [`EPMPDebugConfig`] argument must be set to
67/// [`EPMPDebugEnable`], which will configure the ePMP to operate in non
68/// machine-mode lockdown (MML), but still machine-mode whitelist policy (MMWP),
69/// instead.
70#[derive(Copy, Clone, Debug)]
71pub struct RVDMRegion(pub NAPOTRegionSpec);
72
73// ---------- EarlGrey SoC ePMP JTAG Debugging Configuration -------------------
74
75/// EarlGrey SoC ePMP JTAG Debugging Configuration
76///
77/// The EarlGrey SoC includes a RISC-V Debug Manager mapped to a NAPOT-aligned
78/// memory region. To use a JTAG-debugger with the EarlGrey SoC, this region
79/// needs to be allowed as R/W/X in the ePMP, at least for machine-mode.
80/// However, the RISC-V ePMP does not support R/W/X regions when in machine-mode
81/// lockdown (MML) mode. Furthermore, with the machine-mode whitelist policy
82/// (MMWP) enabled, machine-mode (the kernel) must be given explicit access for
83/// any memory regions to be accessed.
84///
85/// Thus, to enable debugger access, the following changes have to be made in
86/// the EarlGrey ePMP from its default locked-down configuration:
87///
88/// - Machine-Mode Lockdown (MML) must not be enabled
89///
90/// - A locked (machine-mode) PMP memory region must be allocated for the RISC-V
91/// Debug Manager (RVDM) allocated, and be given R/W/X permissions.
92///
93/// - Locked regions are enforced & locked for both machine-mode and
94/// user-mode. This means that we can no longer use locked regions in
95/// combination with the machine-mode whitelist policy to take away access
96/// permissions from user-mode. This means that we need to place all user-mode
97/// regions as non-locked regions _in front of_ all locked machine-mode
98/// regions, and insert a "deny-all" non-locked fallback user-mode region in
99/// between to achieve our desired isolation properties.
100///
101/// As a consequence, because of this "deny-all" user-mode region, we have one
102/// fewer memory regions available to be used as a userspace MPU.
103///
104/// Because all of this is much too complex to implement at runtime (and can't
105/// be reconfigured at runtime once MML is configured), we define a new trait
106/// [`EPMPDebugConfig`] with two implementations [`EPMPDebugEnable`] and
107/// [`EPMPDebugDisable`]. The EPMP implementation is generic over those traits
108/// and can, for instance, advertise a different number of MPU regions available
109/// for userspace. It further contains a method to retrieve the RVDM memory
110/// region's NAPOT address specification irrespective of whether the debug
111/// memory is enabled, and an associated constant to use in the configuration
112/// code (such that the branches not taken can be optimized out).
113pub trait EPMPDebugConfig {
114 /// Whether the debug port shall be enabled or not.
115 const DEBUG_ENABLE: bool;
116
117 /// How many userspace MPU (TOR) regions are available under this
118 /// configuration.
119 const TOR_USER_REGIONS: usize;
120
121 /// The offset where the user-mode TOR PMP entries start. This counts
122 /// "entries", meaning `pmpaddrX` registers. A single "TOR region" uses two
123 /// consecutive "entries".
124 const TOR_USER_ENTRIES_OFFSET: usize;
125}
126
127pub enum EPMPDebugEnable {}
128impl EPMPDebugConfig for EPMPDebugEnable {
129 const DEBUG_ENABLE: bool = true;
130 const TOR_USER_REGIONS: usize = TOR_USER_REGIONS_DEBUG_ENABLE;
131 const TOR_USER_ENTRIES_OFFSET: usize = TOR_USER_ENTRIES_OFFSET_DEBUG_ENABLE;
132}
133
134pub enum EPMPDebugDisable {}
135impl EPMPDebugConfig for EPMPDebugDisable {
136 const DEBUG_ENABLE: bool = false;
137 const TOR_USER_REGIONS: usize = TOR_USER_REGIONS_DEBUG_DISABLE;
138 const TOR_USER_ENTRIES_OFFSET: usize = TOR_USER_ENTRIES_OFFSET_DEBUG_DISABLE;
139}
140
141/// EarlGrey ePMP Configuration Errors
142#[derive(Debug, Copy, Clone)]
143pub enum EarlGreyEPMPError {
144 /// The ePMP driver cannot be instantiated because of an unexpected
145 /// `mseccfg` register value.
146 InvalidInitialMseccfgValue,
147 /// The ePMP driver cannot be instantiated because of an unexpected `pmpcfg`
148 /// register value (where the `usize` value contains the index of the
149 /// `pmpcfg` register).
150 InvalidInitialPmpcfgValue(usize),
151 /// The ePMP registers do not match their expected values after
152 /// configuration. The system cannot be assumed to be in a secure state.
153 SanityCheckFail,
154}
155
156/// RISC-V ePMP memory protection implementation for the EarlGrey SoC.
157///
158/// The EarlGrey ePMP implementation hard-codes many assumptions about the
159/// behavior and state of the underlying hardware, to reduce complexity of this
160/// codebase, and improve its security, reliability and auditability.
161///
162/// Namely, it makes and checks assumptions about the machine security policy
163/// prior to its initialization routine, locks down the hardware through a
164/// static set of PMP configuration steps, and then exposes a subset of regions
165/// for user-mode protection through the `PMPUserMPU` trait.
166///
167/// The EarlGrey ePMP implementation supports JTAG debug-port access through the
168/// integrated RISC-V Debug Manger (RVDM) core, which requires R/W/X-access to a
169/// given region of memory in machine-mode and user-mode. The [`EarlGreyEPMP`]
170/// struct accepts a generic [`EPMPDebugConfig`] implementation, which either
171/// enables (in the case of [`EPMPDebugEnable`]) or disables
172/// ([`EPMPDebugDisable`]) the debug-port access. However, enabling debug-port
173/// access can potentially weaken the system's security by not enabling
174/// machine-mode lockdown (MML), and uses an additional PMP region otherwise
175/// available to userspace. See the documentation of [`EPMPDebugConfig`] for
176/// more information on this.
177///
178/// ## ePMP Region Layout & Configuration (`EPMPDebugDisable` mode)
179///
180/// Because of the machine-mode lockdown (MML) mode, no region can have R/W/X
181/// permissions. The machine-mode whitelist policy (MMWP) further requires all
182/// memory accessed by machine-mode to have a corresponding locked PMP entry
183/// defined. Lower-indexed PMP entires have precedence over entries with higher
184/// indices. Under MML mode, a non-locked (user-mode) entry prevents
185/// machine-mode access to that memory. Thus, the ePMP is to be configured in a
186/// "sandwiched" layout (with decreasing precedence):
187///
188/// 1. High-priority machine-mode "lockdown" entries.
189///
190/// These entries are only accessible to machine mode. Once locked, they can
191/// only be changed through a hart reset. Examples for such memory sections
192/// can be the kernel's `.text` or certain RAM (e.g. stack) sections.
193///
194/// 2. Tock's user-mode "MPU"
195///
196/// This section defines entries corresponding to memory sections made
197/// accessible to user-mode. These entires are exposed through the
198/// implementation of the `TORUserPMP` trait.
199///
200/// **Effectively, this is Tock's "MPU" sandwiched in between the
201/// high-priority and low-priority PMP sections.**
202///
203/// These entires are not locked and must be turned off prior to the kernel
204/// being able to access them.
205///
206/// This section must take precende over the lower kernel-mode entries, as
207/// these entries are aliased by the lower kernel-mode entries. Having a
208/// locked machine-mode entry take precende over an alias a user-space one
209/// prevents user-mode from accessing the aliased memory.
210///
211/// 3. Low-priority machine-mode "accessability" entires.
212///
213/// These entires provide the kernel access to memory regions which are
214/// (partially) aliased by user-mode regions above. This allows for
215/// implementing memory sharing between userspace and the kernel (moving
216/// acccess to user-mode by turning on a region above, and falling back onto
217/// these rules when turning the user-mode region off).
218///
219/// These regions can be granular (e.g. grant R/W on the entire RAM), but
220/// should not provide any excess permissions where not required (e.g. avoid
221/// granting R/X on flash-memory where only R is required, because the
222/// kernel-text is already marked as R/X in the high-priority regions above.
223///
224/// Because the ROM_EXT and test ROM set up different ePMP configs, there are
225/// separate initialization routines (`new` and `new_test_rom`) for those
226/// environments.
227///
228/// `new` (only available when the debug-port is disabled) attempts to set up
229/// the following memory protection rules and layout:
230///
231/// - `msseccfg` CSR:
232///
233/// ```text
234/// |-----+-----------------------------------------------------------+-------|
235/// | BIT | LABEL | STATE |
236/// |-----+-----------------------------------------------------------+-------|
237/// | 0 | Machine-Mode Lockdown (MML) | 1 |
238/// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
239/// | 2 | Rule-Lock Bypass (RLB) | 0 |
240/// |-----+-----------------------------------------------------------+-------|
241/// ```
242///
243/// - `pmpcfgX` / `pmpaddrX` CSRs:
244///
245/// ```text
246/// |-------+----------------------------------------+-----------+---+-------|
247/// | ENTRY | REGION / ADDR | MODE | L | PERMS |
248/// |-------+----------------------------------------+-----------+---+-------|
249/// | 0 | Locked by the ROM_EXT or unused | NAPOT/OFF | X | |
250/// | | | | | |
251/// | 1 | Locked by the ROM_EXT or unused | NAPOT/OFF | X | |
252/// | | | | | |
253/// | 2 | -------------------------------------- | OFF | X | ----- |
254/// | 3 | Kernel .text section | TOR | X | R/X |
255/// | | | | | |
256/// | 4 | / \ | OFF | | |
257/// | 5 | \ Userspace TOR region #0 / | TOR | | ????? |
258/// | | | | | |
259/// | 6 | / \ | OFF | | |
260/// | 7 | \ Userspace TOR region #1 / | TOR | | ????? |
261/// | | | | | |
262/// | 8 | / \ | OFF | | |
263/// | 9 | \ Userspace TOR region #2 / | TOR | | ????? |
264/// | | | | | |
265/// | 10 | / \ | OFF | | |
266/// | 11 | \ Userspace TOR region #3 / | TOR | | ????? |
267/// | | | | | |
268/// | 12 | FLASH (spanning kernel & apps) | NAPOT | X | R |
269/// | | | | | |
270/// | 13 | -------------------------------------- | OFF | X | ----- |
271/// | | | | | |
272/// | 14 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
273/// | | | | | |
274/// | 15 | MMIO | NAPOT | X | R/W |
275/// |-------+----------------------------------------+-----------+---+-------|
276/// ```
277///
278/// `new_test_rom` (only available when the debug-port is disabled) attempts to
279/// set up the following memory protection rules and layout:
280///
281/// - `msseccfg` CSR:
282///
283/// ```text
284/// |-----+-----------------------------------------------------------+-------|
285/// | BIT | LABEL | STATE |
286/// |-----+-----------------------------------------------------------+-------|
287/// | 0 | Machine-Mode Lockdown (MML) | 1 |
288/// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
289/// | 2 | Rule-Lock Bypass (RLB) | 0 |
290/// |-----+-----------------------------------------------------------+-------|
291/// ```
292///
293/// - `pmpcfgX` / `pmpaddrX` CSRs:
294///
295/// ```text
296/// |-------+---------------------------------------------+-------+---+-------|
297/// | ENTRY | REGION / ADDR | MODE | L | PERMS |
298/// |-------+---------------------------------------------+-------+---+-------|
299/// | 0 | ------------------------------------------- | OFF | X | ----- |
300/// | 1 | Kernel .text section | TOR | X | R/X |
301/// | | | | | |
302/// | 2 | ------------------------------------------- | OFF | X | |
303/// | | | | | |
304/// | 3 | ------------------------------------------- | OFF | X | |
305/// | | | | | |
306/// | 4 | / \ | OFF | | |
307/// | 5 | \ Userspace TOR region #0 / | TOR | | ????? |
308/// | | | | | |
309/// | 6 | / \ | OFF | | |
310/// | 7 | \ Userspace TOR region #1 / | TOR | | ????? |
311/// | | | | | |
312/// | 8 | / \ | OFF | | |
313/// | 9 | \ Userspace TOR region #2 / | TOR | | ????? |
314/// | | | | | |
315/// | 10 | / \ | OFF | | |
316/// | 11 | \ Userspace TOR region #3 / | TOR | | ????? |
317/// | | | | | |
318/// | 12 | ------------------------------------------- | OFF | X | ----- |
319/// | | | | | |
320/// | 13 | FLASH (spanning kernel & apps) | NAPOT | X | R |
321/// | | | | | |
322/// | 14 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
323/// | | | | | |
324/// | 15 | MMIO | NAPOT | X | R/W |
325/// |-------+---------------------------------------------+-------+---+-------|
326/// ```
327///
328/// ## ePMP Region Layout & Configuration (`EPMPDebugEnable` mode)
329///
330/// When enabling the RISC-V Debug Manager (JTAG debug port), the ePMP must be
331/// configured differently. This is because the `RVDM` requires a memory section
332/// to be mapped with read-write-execute privileges, which is not possible under
333/// the machine-mode lockdown (MML) mode. However, when simply disabling MML in
334/// the above policy, it would grant userspace access to kernel memory through
335/// the locked PMP entires. We still need to define locked PMP entries to grant
336/// the kernel (machine-mode) access to its required memory regions, as the
337/// machine-mode whitelist policy (MMWP) is enabled.
338///
339/// Thus we split the PMP entires into three parts, as outlined in the
340/// following:
341///
342/// 1. Tock's user-mode "MPU"
343///
344/// This section defines entries corresponding to memory sections made
345/// accessible to user-mode. These entires are exposed through the
346/// implementation of the `TORUserPMP` trait.
347///
348/// These entires are not locked. Because the machine-mode lockdown (MML)
349/// mode is not enabled, non-locked regions are ignored in machine-mode. The
350/// kernel does not have to disable these entires prior to being able to
351/// access them.
352///
353/// This section must take precende over the lower kernel-mode entries, as
354/// these entries are aliased by the lower kernel-mode entries. Having a
355/// locked machine-mode entry take precende over an alias a user-space one
356/// prevents user-mode from accessing the aliased memory.
357///
358/// 2. User-mode "deny-all" rule.
359///
360/// Without machine-mode lockdown (MML) mode, locked regions apply to both
361/// user- and kernel-mode. Because the machine-mode whitelist policy (MMWP)
362/// is enabled, the kernel must be granted explicit permission to access
363/// memory (default-deny policy). This means that we must prevent any
364/// user-mode access from "falling through" to kernel-mode regions. For this
365/// purpose, we insert a non-locked "deny-all" rule which disallows all
366/// user-mode accesses to the entire address space, if no other
367/// higher-priority user-mode rule matches.
368///
369/// 3. Machine-mode "accessability" entires.
370///
371/// These entires provide the kernel access to certain memory regions, as
372/// required by the machine-mode whitelist policy (MMWP).
373///
374/// `new_debug` (only available when the debug-port is enabled) attempts to set
375/// up the following memory protection rules and layout:
376///
377/// - `msseccfg` CSR:
378///
379/// ```text
380/// |-----+-----------------------------------------------------------+-------|
381/// | BIT | LABEL | STATE |
382/// |-----+-----------------------------------------------------------+-------|
383/// | 0 | Machine-Mode Lockdown (MML) | 0 |
384/// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
385/// | 2 | Rule-Lock Bypass (RLB) | 0 |
386/// |-----+-----------------------------------------------------------+-------|
387/// ```
388///
389/// - `pmpcfgX` / `pmpaddrX` CSRs:
390///
391/// ```text
392/// |-------+---------------------------------------------+-------+---+-------|
393/// | ENTRY | REGION / ADDR | MODE | L | PERMS |
394/// |-------+---------------------------------------------+-------+---+-------|
395/// | 0 | / \ | OFF | | |
396/// | 1 | \ Userspace TOR region #0 / | TOR | | ????? |
397/// | | | | | |
398/// | 2 | / \ | OFF | | |
399/// | 3 | \ Userspace TOR region #1 / | TOR | | ????? |
400/// | | | | | |
401/// | 4 | / \ | OFF | | |
402/// | 5 | \ Userspace TOR region #2 / | TOR | | ????? |
403/// | | | | | |
404/// | 6 | / \ | OFF | | |
405/// | 7 | \ Userspace TOR region #3 / | TOR | | ????? |
406/// | | | | | |
407/// | 8 | ------------------------------------------- | OFF | | ----- |
408/// | | | | | |
409/// | 9 | "Deny-all" user-mode rule (all memory) | NAPOT | | ----- |
410/// | | | | | |
411/// | 10 | ------------------------------------------- | OFF | X | ----- |
412/// | 11 | Kernel .text section | TOR | X | R/X |
413/// | | | | | |
414/// | 12 | RVDM Debug Core Memory | NAPOT | X | R/W/X |
415/// | | | | | |
416/// | 13 | FLASH (spanning kernel & apps) | NAPOT | X | R |
417/// | | | | | |
418/// | 14 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
419/// | | | | | |
420/// | 15 | MMIO | NAPOT | X | R/W |
421/// |-------+---------------------------------------------+-------+---+-------|
422/// ```
423pub struct EarlGreyEPMP<const HANDOVER_CONFIG_CHECK: bool, DBG: EPMPDebugConfig> {
424 user_pmp_enabled: Cell<bool>,
425 // We can't use our generic parameter to determine the length of the
426 // TORUserPMPCFG array (missing `generic_const_exprs` feature). Thus we
427 // always assume that the debug-port is disabled and we can fit
428 // `TOR_USER_REGIONS_DEBUG_DISABLE` user-mode TOR regions.
429 shadow_user_pmpcfgs: [Cell<TORUserPMPCFG>; TOR_USER_REGIONS_DEBUG_DISABLE],
430 _pd: PhantomData<DBG>,
431}
432
433impl<const HANDOVER_CONFIG_CHECK: bool> EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugDisable> {
434 pub unsafe fn new(
435 flash: FlashRegion,
436 ram: RAMRegion,
437 mmio: MMIORegion,
438 kernel_text: KernelTextRegion,
439 ) -> Result<Self, EarlGreyEPMPError> {
440 use kernel::utilities::registers::interfaces::{Readable, Writeable};
441
442 // --> We start with the "high-priority" ("lockdown") section of the
443 // ePMP configuration:
444
445 // Provide R/X access to the kernel .text as passed to us above.
446 // Allocate a TOR region in PMP entries 2 and 3:
447 csr::CSR.pmpaddr2.set(kernel_text.0.pmpaddr_a());
448 csr::CSR.pmpaddr3.set(kernel_text.0.pmpaddr_b());
449
450 // Set the appropriate `pmpcfg0` register value:
451 //
452 // 0x80 = 0b10000000, for start the address of the kernel .text TOR
453 // entry as well as entries 0 and 1.
454 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
455 //
456 // 0x8d = 0b10001101, for kernel .text TOR region
457 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
458 //
459 // Note that we try to lock entries 0 and 1 into OFF mode. If the
460 // ROM_EXT set these up and locked them, this will do nothing, otherwise
461 // it will permanently disable these entries (preventing them from being
462 // misused later).
463 csr::CSR.pmpcfg0.set(0x8d_80_80_80);
464
465 // --> Continue with the "low-priority" ("accessibility") section of the
466 // ePMP configuration:
467
468 // Configure a Read-Only NAPOT region for the entire flash (spanning
469 // kernel & apps, but overlayed by the R/X kernel text TOR section)
470 csr::CSR.pmpaddr12.set(flash.0.pmpaddr());
471
472 // Configure a Read-Write NAPOT region for MMIO.
473 csr::CSR.pmpaddr14.set(mmio.0.pmpaddr());
474
475 // Configure a Read-Write NAPOT region for the entire RAM (spanning
476 // kernel & apps)
477 csr::CSR.pmpaddr15.set(ram.0.pmpaddr());
478
479 // With the FLASH, RAM and MMIO configured in separate regions, we can
480 // activate this new configuration, and further adjust the permissions
481 // of the (currently all-capable) last PMP entry `pmpaddr15` to be R/W,
482 // as required for MMIO:
483 //
484 // 0x99 = 0b10011001, for FLASH NAPOT region
485 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
486 //
487 // 0x80 = 0b10000000, for the unused region
488 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
489 //
490 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
491 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
492 csr::CSR.pmpcfg3.set(0x9B_9B_80_99);
493
494 // Ensure that the other pmpcfgX CSRs are cleared:
495 csr::CSR.pmpcfg1.set(0x00000000);
496 csr::CSR.pmpcfg2.set(0x00000000);
497
498 // ---------- PMP machine CSRs configured, lock down the system
499
500 // Finally, enable machine-mode lockdown.
501 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
502 csr::CSR.mseccfg.set(0x00000003);
503
504 // ---------- System locked down, cross-check config
505
506 // Now, cross-check that the CSRs have the expected values. This acts as
507 // a sanity check, and can also help to protect against some set of
508 // fault-injection attacks. These checks can't be optimized out by the
509 // compiler, as they invoke assembly underneath which is not marked as
510 // ["pure"](https://doc.rust-lang.org/reference/inline-assembly.html).
511 //
512 // Note that different ROM_EXT versions configure entries 0 and 1
513 // differently, so we only confirm they are locked here.
514 if csr::CSR.mseccfg.get() != 0x00000003
515 || (csr::CSR.pmpcfg0.get() & 0xFFFF8080) != 0x8d808080
516 || csr::CSR.pmpcfg1.get() != 0x00000000
517 || csr::CSR.pmpcfg2.get() != 0x00000000
518 || csr::CSR.pmpcfg3.get() != 0x9B9B8099
519 || csr::CSR.pmpaddr2.get() != kernel_text.0.pmpaddr_a()
520 || csr::CSR.pmpaddr3.get() != kernel_text.0.pmpaddr_b()
521 || csr::CSR.pmpaddr12.get() != flash.0.pmpaddr()
522 || csr::CSR.pmpaddr14.get() != mmio.0.pmpaddr()
523 || csr::CSR.pmpaddr15.get() != ram.0.pmpaddr()
524 {
525 return Err(EarlGreyEPMPError::SanityCheckFail);
526 }
527
528 // The ePMP hardware was correctly configured, build the ePMP struct:
529 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
530 Ok(EarlGreyEPMP {
531 user_pmp_enabled: Cell::new(false),
532 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; TOR_USER_REGIONS_DEBUG_DISABLE],
533 _pd: PhantomData,
534 })
535 }
536
537 pub unsafe fn new_test_rom(
538 flash: FlashRegion,
539 ram: RAMRegion,
540 mmio: MMIORegion,
541 kernel_text: KernelTextRegion,
542 ) -> Result<Self, EarlGreyEPMPError> {
543 use kernel::utilities::registers::interfaces::{Readable, Writeable};
544
545 if HANDOVER_CONFIG_CHECK {
546 Self::check_initial_hardware_config()?;
547 } else {
548 // We aren't supposed to run a handover configuration check. This is
549 // useful for environments which don't replicate the OpenTitan
550 // EarlGrey chip behavior entirely accurately, such as
551 // QEMU. However, in those environments, we cannot guarantee that
552 // this configuration is actually going to work, and not break the
553 // system in the meantime.
554 //
555 // We perform a best-effort configuration, starting by setting rule-lock
556 // bypass...
557 csr::CSR.mseccfg.set(0x00000004);
558 // ...adding our required kernel-mode mode memory access rule...
559 csr::CSR.pmpaddr15.set(0x7FFFFFFF);
560 csr::CSR.pmpcfg3.set(0x9F000000);
561 // ...and enabling the machine-mode whitelist policy:
562 csr::CSR.mseccfg.set(0x00000006);
563 }
564
565 // ---------- HW configured as expected, start setting PMP CSRs
566
567 // The below instructions are an intricate dance to achieve our desired
568 // ePMP configuration. For correctness sake, we -- at no intermediate
569 // point -- want to lose access to RAM, FLASH or MMIO.
570 //
571 // This is challenging, as the last section currently provides us access
572 // to all of these regions, and we can't atomically change both its
573 // pmpaddrX and pmpcfgX CSRs to limit it to a subset of its address
574 // range and permissions. Thus, before changing the `pmpcfg3` /
575 // `pmpaddr15` region, we first utilize another higher-priority CSR to
576 // provide us access to one of the memory regions we'd lose access to,
577 // namely we use the PMP entry 12 to provide us access to MMIO.
578
579 // --> We start with the "high-priority" ("lockdown") section of the
580 // ePMP configuration:
581
582 // Provide R/X access to the kernel .text as passed to us above.
583 // Allocate a TOR region in PMP entries 0 and 1:
584 csr::CSR.pmpaddr0.set(kernel_text.0.pmpaddr_a());
585 csr::CSR.pmpaddr1.set(kernel_text.0.pmpaddr_b());
586
587 // Set the appropriate `pmpcfg0` register value:
588 //
589 // 0x80 = 0b10000000, for start address of the kernel .text TOR entry
590 // and to disable regions 2 & 3 (to be compatible with the
591 // non-test-rom constructor).
592 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
593 //
594 // 0x8d = 0b10001101, for kernel .text TOR region
595 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
596 csr::CSR.pmpcfg0.set(0x80808d80);
597
598 // --> Continue with the "low-priority" ("accessability") section of the
599 // ePMP configuration:
600
601 // Now, onto `pmpcfg3`. As discussed above, we want to use a temporary
602 // region to retain MMIO access while reconfiguring the `pmpcfg3` /
603 // `pmpaddr15` register. Thus, write the MMIO region access into
604 // `pmpaddr12`:
605 csr::CSR.pmpaddr12.set(mmio.0.pmpaddr());
606
607 // Configure a Read-Only NAPOT region for the entire flash (spanning
608 // kernel & apps, but overlayed by the R/X kernel text TOR section)
609 csr::CSR.pmpaddr13.set(flash.0.pmpaddr());
610
611 // Configure a Read-Write NAPOT region for the entire RAM (spanning
612 // kernel & apps)
613 csr::CSR.pmpaddr14.set(ram.0.pmpaddr());
614
615 // With the FLASH, RAM and MMIO configured in separate regions, we can
616 // activate this new configuration, and further adjust the permissions
617 // of the (currently all-capable) last PMP entry `pmpaddr15` to be R/W,
618 // as required for MMIO:
619 //
620 // 0x99 = 0b10011001, for FLASH NAPOT region
621 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
622 //
623 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
624 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
625 csr::CSR.pmpcfg3.set(0x9B9B999B);
626
627 // With the new configuration in place, we can adjust the last region's
628 // address to be limited to the MMIO region, ...
629 csr::CSR.pmpaddr15.set(mmio.0.pmpaddr());
630
631 // ...and then deactivate the `pmpaddr12` fallback MMIO region
632 //
633 // Remove the temporary MMIO region permissions from `pmpaddr12`:
634 //
635 // 0x80 = 0b10000000
636 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
637 //
638 // 0x99 = 0b10011001, for FLASH NAPOT region
639 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
640 //
641 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
642 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
643 csr::CSR.pmpcfg3.set(0x9B9B9980);
644
645 // Ensure that the other pmpcfgX CSRs are cleared:
646 csr::CSR.pmpcfg1.set(0x00000000);
647 csr::CSR.pmpcfg2.set(0x00000000);
648
649 // ---------- PMP machine CSRs configured, lock down the system
650
651 // Finally, unset the rule-lock bypass (RLB) bit. If we don't have a
652 // debug memory region provided, further set machine-mode lockdown (we
653 // can't enable MML and also have a R/W/X region). We also set MMWP for
654 // good measure, but that shouldn't make a difference -- it can't be
655 // cleared anyways as it is a sticky bit.
656 //
657 // Unsetting RLB with at least one locked region will mean that we can't
658 // set it again, thus actually enforcing the region lock bits.
659 //
660 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
661 csr::CSR.mseccfg.set(0x00000003);
662
663 // ---------- System locked down, cross-check config
664
665 // Now, cross-check that the CSRs have the expected values. This acts as
666 // a sanity check, and can also help to protect against some set of
667 // fault-injection attacks. These checks can't be optimized out by the
668 // compiler, as they invoke assembly underneath which is not marked as
669 // ["pure"](https://doc.rust-lang.org/reference/inline-assembly.html).
670 if csr::CSR.mseccfg.get() != 0x00000003
671 || csr::CSR.pmpcfg0.get() != 0x00008d80
672 || csr::CSR.pmpcfg1.get() != 0x00000000
673 || csr::CSR.pmpcfg2.get() != 0x00000000
674 || csr::CSR.pmpcfg3.get() != 0x9B9B9980
675 || csr::CSR.pmpaddr0.get() != kernel_text.0.pmpaddr_a()
676 || csr::CSR.pmpaddr1.get() != kernel_text.0.pmpaddr_b()
677 || csr::CSR.pmpaddr13.get() != flash.0.pmpaddr()
678 || csr::CSR.pmpaddr14.get() != ram.0.pmpaddr()
679 || csr::CSR.pmpaddr15.get() != mmio.0.pmpaddr()
680 {
681 return Err(EarlGreyEPMPError::SanityCheckFail);
682 }
683
684 // The ePMP hardware was correctly configured, build the ePMP struct:
685 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
686 Ok(EarlGreyEPMP {
687 user_pmp_enabled: Cell::new(false),
688 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; TOR_USER_REGIONS_DEBUG_DISABLE],
689 _pd: PhantomData,
690 })
691 }
692}
693
694impl<const HANDOVER_CONFIG_CHECK: bool> EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugEnable> {
695 pub unsafe fn new_debug(
696 flash: FlashRegion,
697 ram: RAMRegion,
698 mmio: MMIORegion,
699 kernel_text: KernelTextRegion,
700 debug_memory: RVDMRegion,
701 ) -> Result<Self, EarlGreyEPMPError> {
702 use kernel::utilities::registers::interfaces::{Readable, Writeable};
703
704 if HANDOVER_CONFIG_CHECK {
705 Self::check_initial_hardware_config()?;
706 } else {
707 // We aren't supposed to run a handover configuration check. This is
708 // useful for environments which don't replicate the OpenTitan
709 // EarlGrey chip behavior entirely accurately, such as
710 // QEMU. However, in those environments, we cannot guarantee that
711 // this configuration is actually going to work, and not break the
712 // system in the meantime.
713 //
714 // We perform a best-effort configuration, starting by setting rule-lock
715 // bypass...
716 csr::CSR.mseccfg.set(0x00000004);
717 // ...adding our required kernel-mode mode memory access rule...
718 csr::CSR.pmpaddr15.set(0x7FFFFFFF);
719 csr::CSR.pmpcfg3.set(0x9F000000);
720 // ...and enabling the machine-mode whitelist policy:
721 csr::CSR.mseccfg.set(0x00000006);
722 }
723
724 // ---------- HW configured as expected, start setting PMP CSRs
725
726 // The below instructions are an intricate dance to achieve our desired
727 // ePMP configuration. For correctness sake, we -- at no intermediate
728 // point -- want to lose access to RAM, FLASH or MMIO.
729 //
730 // This is challenging, as the last section currently provides us access
731 // to all of these regions, and we can't atomically change both its
732 // pmpaddrX and pmpcfgX CSRs to limit it to a subset of its address
733 // range and permissions. Thus, before changing the `pmpcfg3` /
734 // `pmpaddr15` region, we first utilize another higher-priority CSR to
735 // provide us access to one of the memory regions we'd lose access to,
736 // namely we use the PMP entry 12 to provide us access to MMIO.
737
738 // Provide R/X access to the kernel .text as passed to us above.
739 // Allocate a TOR region in PMP entries 10 and 11:
740 csr::CSR.pmpaddr10.set(kernel_text.0.pmpaddr_a());
741 csr::CSR.pmpaddr11.set(kernel_text.0.pmpaddr_b());
742
743 // Set the appropriate `pmpcfg2` register value:
744 //
745 // 0x80 = 0b10000000, for start address of the kernel .text TOR entry
746 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
747 //
748 // 0x8d = 0b10001101, for kernel .text TOR region
749 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
750 csr::CSR.pmpcfg2.set(0x8d800000);
751
752 // Now, onto `pmpcfg3`. As discussed above, we want to use a temporary
753 // region to retain MMIO access while reconfiguring the `pmpcfg3` /
754 // `pmpaddr15` register. Thus, write the MMIO region access into
755 // `pmpaddr12`:
756 csr::CSR.pmpaddr12.set(mmio.0.pmpaddr());
757
758 // Configure a Read-Only NAPOT region for the entire flash (spanning
759 // kernel & apps, but overlayed by the R/X kernel text TOR section)
760 csr::CSR.pmpaddr13.set(flash.0.pmpaddr());
761
762 // Configure a Read-Write NAPOT region for the entire RAM (spanning
763 // kernel & apps)
764 csr::CSR.pmpaddr14.set(ram.0.pmpaddr());
765
766 // With the FLASH, RAM and MMIO configured in separate regions, we can
767 // activate this new configuration, and further adjust the permissions
768 // of the (currently all-capable) last PMP entry `pmpaddr15` to be R/W,
769 // as required for MMIO:
770 //
771 // 0x99 = 0b10011001, for FLASH NAPOT region
772 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
773 //
774 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
775 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
776 csr::CSR.pmpcfg3.set(0x9B9B999B);
777
778 // With the new configuration in place, we can adjust the last region's
779 // address to be limited to the MMIO region, ...
780 csr::CSR.pmpaddr15.set(mmio.0.pmpaddr());
781
782 // ...and then repurpose `pmpaddr12` for the debug port:
783 csr::CSR.pmpaddr12.set(debug_memory.0.pmpaddr());
784
785 // 0x9F = 0b10011111, for RVDM R/W/X memory region
786 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 1, W(1) = 1, R(0) = 1
787 //
788 // 0x99 = 0b10011001, for FLASH NAPOT region
789 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
790 //
791 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
792 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
793 csr::CSR.pmpcfg3.set(0x9B9B999F);
794
795 // Ensure that the other pmpcfgX CSRs are cleared:
796 csr::CSR.pmpcfg0.set(0x00000000);
797 csr::CSR.pmpcfg1.set(0x00000000);
798
799 // ---------- PMP machine CSRs configured, lock down the system
800
801 // Finally, unset the rule-lock bypass (RLB) bit. If we don't have a
802 // debug memory region provided, further set machine-mode lockdown (we
803 // can't enable MML and also have a R/W/X region). We also set MMWP for
804 // good measure, but that shouldn't make a difference -- it can't be
805 // cleared anyways as it is a sticky bit.
806 //
807 // Unsetting RLB with at least one locked region will mean that we can't
808 // set it again, thus actually enforcing the region lock bits.
809 //
810 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 0
811 csr::CSR.mseccfg.set(0x00000002);
812
813 // ---------- System locked down, cross-check config
814
815 // Now, cross-check that the CSRs have the expected values. This acts as
816 // a sanity check, and can also help to protect against some set of
817 // fault-injection attacks. These checks can't be optimized out by the
818 // compiler, as they invoke assembly underneath which is not marked as
819 // ["pure"](https://doc.rust-lang.org/reference/inline-assembly.html).
820 if csr::CSR.mseccfg.get() != 0x00000002
821 || csr::CSR.pmpcfg0.get() != 0x00000000
822 || csr::CSR.pmpcfg1.get() != 0x00000000
823 || csr::CSR.pmpcfg2.get() != 0x8d800000
824 || csr::CSR.pmpcfg3.get() != 0x9B9B999F
825 || csr::CSR.pmpaddr10.get() != kernel_text.0.pmpaddr_a()
826 || csr::CSR.pmpaddr11.get() != kernel_text.0.pmpaddr_b()
827 || csr::CSR.pmpaddr12.get() != debug_memory.0.pmpaddr()
828 || csr::CSR.pmpaddr13.get() != flash.0.pmpaddr()
829 || csr::CSR.pmpaddr14.get() != ram.0.pmpaddr()
830 || csr::CSR.pmpaddr15.get() != mmio.0.pmpaddr()
831 {
832 return Err(EarlGreyEPMPError::SanityCheckFail);
833 }
834
835 // Now, as we're not in the machine-mode lockdown (MML) mode, locked PMP
836 // regions will still be accessible to userspace. To prevent our
837 // kernel-mode access regions from being accessible to user-mode, we use
838 // the last user-mode TOR region (`pmpaddr9`) to configure a
839 // "protection" region which disallows access to all memory that has not
840 // otherwise been granted access to.
841 csr::CSR.pmpaddr9.set(0x7FFFFFFF); // the entire address space
842
843 // And finally apply this configuration to the `pmpcfg2` CSR. For good
844 // measure, we also include the locked regions (which we can no longer
845 // modify thanks to RLB = 0).
846 //
847 // 0x18 = 0b00011000, to revoke user-mode perms to all memory
848 // setting L(7) = 0, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 0
849 //
850 // 0x80 = 0b10000000, for start address of the kernel .text TOR entry
851 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
852 //
853 // 0x8d = 0b10001101, for kernel .text TOR region
854 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
855 csr::CSR.pmpcfg2.set(0x8d81800);
856
857 // The ePMP hardware was correctly configured, build the ePMP struct:
858 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
859 let epmp = EarlGreyEPMP {
860 user_pmp_enabled: Cell::new(false),
861 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; TOR_USER_REGIONS_DEBUG_DISABLE],
862 _pd: PhantomData,
863 };
864
865 Ok(epmp)
866 }
867}
868
869impl<const HANDOVER_CONFIG_CHECK: bool, DBG: EPMPDebugConfig>
870 EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, DBG>
871{
872 fn check_initial_hardware_config() -> Result<(), EarlGreyEPMPError> {
873 use kernel::utilities::registers::interfaces::Readable;
874
875 // This initialization code is written to work with 16 PMP entries. Add
876 // an explicit assertion such that things break when the constant above
877 // is changed:
878 #[allow(clippy::assertions_on_constants)]
879 const _: () = assert!(
880 PMP_ENTRIES_OVER_TWO == 8,
881 "EarlGrey ePMP initialization is written for 16 PMP entries.",
882 );
883
884 // ---------- Check current HW config
885
886 // Ensure that the `mseccfg` CSR has the expected value, namely that
887 // we're in "machine-mode whitelist policy" and have "rule-lock bypass"
888 // enabled. If this register has an unexpected value, we risk
889 // accidentally revoking important permissions for the Tock kernel
890 // itself.
891 if csr::CSR.mseccfg.get() != 0x00000006 {
892 return Err(EarlGreyEPMPError::InvalidInitialMseccfgValue);
893 }
894
895 // We assume the very last PMP region is set to provide us RXW access to
896 // the entirety of memory, and all other regions are disabled. Check the
897 // CSRs to make sure that this is indeed the case.
898 for i in 0..(PMP_ENTRIES_OVER_TWO / 2 - 1) {
899 // 0x98 = 0b10011000, extracting L(7) and A(4-3) bits.
900 if csr::CSR.pmpconfig_get(i) & 0x98989898 != 0x00000000 {
901 return Err(EarlGreyEPMPError::InvalidInitialPmpcfgValue(i));
902 }
903 }
904
905 // The last CSR is special, as we expect it to contain the NAPOT region
906 // which currently gives us memory access.
907 //
908 // 0x98 = 0b10011000, extracting L(7) and A(4-3) bits.
909 // 0x9F = 0b10011111, extracing L(7), A(4-3), X(2), W(1), R(0) bits.
910 if csr::CSR.pmpconfig_get(PMP_ENTRIES_OVER_TWO / 2 - 1) & 0x9F989898 != 0x9F000000 {
911 return Err(EarlGreyEPMPError::InvalidInitialPmpcfgValue(
912 PMP_ENTRIES_OVER_TWO / 2 - 1,
913 ));
914 }
915
916 Ok(())
917 }
918
919 // ---------- Backing functions for the TORUserPMP implementations ---------
920 //
921 // The EarlGrey ePMP implementations of `TORUserPMP` differ between
922 // `EPMPDebugEnable` and `EPMPDebugDisable` configurations. These backing
923 // functions here are applicable to both, and called by those trait
924 // implementations respectively:
925
926 fn user_available_regions<const TOR_USER_REGIONS: usize>(&self) -> usize {
927 // Always assume to have `TOR_USER_REGIONS` usable TOR regions. We have a
928 // fixed number of kernel memory protection regions, and a fixed mapping
929 // of user regions to hardware PMP entries.
930 TOR_USER_REGIONS
931 }
932
933 fn user_configure_pmp<const TOR_USER_REGIONS: usize>(
934 &self,
935 regions: &[(TORUserPMPCFG, *const u8, *const u8); TOR_USER_REGIONS],
936 ) -> Result<(), ()> {
937 // Configure all of the regions' addresses and store their pmpcfg octets
938 // in our shadow storage. If the user PMP is already enabled, we further
939 // apply this configuration (set the pmpcfgX CSRs) by running
940 // `enable_user_pmp`:
941 for (i, (region, shadow_user_pmpcfg)) in regions
942 .iter()
943 .zip(self.shadow_user_pmpcfgs.iter())
944 .enumerate()
945 {
946 // The ePMP in MML mode does not support read-write-execute
947 // regions. If such a region is to be configured, abort. As this
948 // loop here only modifies the shadow state, we can simply abort and
949 // return an error. We don't make any promises about the ePMP state
950 // if the configuration files, but it is still being activated with
951 // `enable_user_pmp`:
952 if region.0.get()
953 == <TORUserPMPCFG as From<mpu::Permissions>>::from(
954 mpu::Permissions::ReadWriteExecute,
955 )
956 .get()
957 {
958 return Err(());
959 }
960
961 // Set the CSR addresses for this region (if its not OFF, in which
962 // case the hardware-configured addresses are irrelevant):
963 if region.0 != TORUserPMPCFG::OFF {
964 csr::CSR.pmpaddr_set(
965 DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 0,
966 (region.1 as usize).overflowing_shr(2).0,
967 );
968 csr::CSR.pmpaddr_set(
969 DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 1,
970 (region.2 as usize).overflowing_shr(2).0,
971 );
972 }
973
974 // Store the region's pmpcfg octet:
975 shadow_user_pmpcfg.set(region.0);
976 }
977
978 // If the PMP is currently active, apply the changes to the CSRs:
979 if self.user_pmp_enabled.get() {
980 self.user_enable_user_pmp()?;
981 }
982
983 Ok(())
984 }
985
986 fn user_enable_user_pmp(&self) -> Result<(), ()> {
987 // Currently, this code requires the TOR regions to start at an even PMP
988 // region index. Assert that this is indeed the case:
989 #[allow(clippy::let_unit_value)]
990 let _: () = assert!(DBG::TOR_USER_ENTRIES_OFFSET % 2 == 0);
991
992 // We store the "enabled" PMPCFG octets of user regions in the
993 // `shadow_user_pmpcfg` field, such that we can re-enable the PMP
994 // without a call to `configure_pmp` (where the `TORUserPMPCFG`s are
995 // provided by the caller).
996
997 // Could use `iter_array_chunks` once that's stable.
998 //
999 // Limit iteration to `DBG::TOR_USER_REGIONS` to avoid overwriting any
1000 // configured debug regions in the last user-mode TOR region.
1001 let mut shadow_user_pmpcfgs_iter = self.shadow_user_pmpcfgs[..DBG::TOR_USER_REGIONS].iter();
1002 let mut i = DBG::TOR_USER_ENTRIES_OFFSET / 2;
1003
1004 while let Some(first_region_pmpcfg) = shadow_user_pmpcfgs_iter.next() {
1005 // If we're at a "region" offset divisible by two (where "region" =
1006 // 2 PMP "entries"), then we can configure an entire `pmpcfgX` CSR
1007 // in one operation. As CSR writes are expensive, this is an
1008 // operation worth making:
1009 let second_region_opt = if i % 2 == 0 {
1010 shadow_user_pmpcfgs_iter.next()
1011 } else {
1012 None
1013 };
1014
1015 if let Some(second_region_pmpcfg) = second_region_opt {
1016 // We're at an even index and have two regions to configure, so
1017 // do that with a single CSR write:
1018 csr::CSR.pmpconfig_set(
1019 i / 2,
1020 u32::from_be_bytes([
1021 second_region_pmpcfg.get().get(),
1022 TORUserPMPCFG::OFF.get(),
1023 first_region_pmpcfg.get().get(),
1024 TORUserPMPCFG::OFF.get(),
1025 ]) as usize,
1026 );
1027
1028 i += 2;
1029 } else if i % 2 == 0 {
1030 // This is a single region at an even index. Thus, modify the
1031 // first two pmpcfgX octets for this region.
1032 csr::CSR.pmpconfig_modify(
1033 i / 2,
1034 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1035 0x0000FFFF,
1036 0, // lower two octets
1037 u32::from_be_bytes([
1038 0,
1039 0,
1040 first_region_pmpcfg.get().get(),
1041 TORUserPMPCFG::OFF.get(),
1042 ]) as usize,
1043 ),
1044 );
1045
1046 i += 1;
1047 } else {
1048 // This is a single region at an odd index. Thus, modify the
1049 // latter two pmpcfgX octets for this region.
1050 csr::CSR.pmpconfig_modify(
1051 i / 2,
1052 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1053 0x0000FFFF,
1054 16, // higher two octets
1055 u32::from_be_bytes([
1056 0,
1057 0,
1058 first_region_pmpcfg.get().get(),
1059 TORUserPMPCFG::OFF.get(),
1060 ]) as usize,
1061 ),
1062 );
1063
1064 i += 1;
1065 }
1066 }
1067
1068 self.user_pmp_enabled.set(true);
1069
1070 Ok(())
1071 }
1072
1073 fn user_disable_user_pmp(&self) {
1074 // Simply set all of the user-region pmpcfg octets to OFF:
1075 let mut user_region_pmpcfg_octet_pairs = (DBG::TOR_USER_ENTRIES_OFFSET / 2)
1076 ..((DBG::TOR_USER_ENTRIES_OFFSET / 2) + DBG::TOR_USER_REGIONS);
1077
1078 while let Some(first_region_idx) = user_region_pmpcfg_octet_pairs.next() {
1079 let second_region_opt = if first_region_idx % 2 == 0 {
1080 user_region_pmpcfg_octet_pairs.next()
1081 } else {
1082 None
1083 };
1084
1085 if let Some(_second_region_idx) = second_region_opt {
1086 // We're at an even index and have two regions to configure, so
1087 // do that with a single CSR write:
1088 csr::CSR.pmpconfig_set(
1089 first_region_idx / 2,
1090 u32::from_be_bytes([
1091 TORUserPMPCFG::OFF.get(),
1092 TORUserPMPCFG::OFF.get(),
1093 TORUserPMPCFG::OFF.get(),
1094 TORUserPMPCFG::OFF.get(),
1095 ]) as usize,
1096 );
1097 } else if first_region_idx % 2 == 0 {
1098 // This is a single region at an even index. Thus, modify the
1099 // first two pmpcfgX octets for this region.
1100 csr::CSR.pmpconfig_modify(
1101 first_region_idx / 2,
1102 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1103 0x0000FFFF,
1104 0, // lower two octets
1105 u32::from_be_bytes([
1106 0,
1107 0,
1108 TORUserPMPCFG::OFF.get(),
1109 TORUserPMPCFG::OFF.get(),
1110 ]) as usize,
1111 ),
1112 );
1113 } else {
1114 // This is a single region at an odd index. Thus, modify the
1115 // latter two pmpcfgX octets for this region.
1116 csr::CSR.pmpconfig_modify(
1117 first_region_idx / 2,
1118 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1119 0x0000FFFF,
1120 16, // higher two octets
1121 u32::from_be_bytes([
1122 0,
1123 0,
1124 TORUserPMPCFG::OFF.get(),
1125 TORUserPMPCFG::OFF.get(),
1126 ]) as usize,
1127 ),
1128 );
1129 }
1130 }
1131
1132 self.user_pmp_enabled.set(false);
1133 }
1134}
1135
1136impl<const HANDOVER_CONFIG_CHECK: bool> TORUserPMP<{ TOR_USER_REGIONS_DEBUG_ENABLE }>
1137 for EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugEnable>
1138{
1139 // Don't require any const-assertions in the EarlGreyEPMP.
1140 const CONST_ASSERT_CHECK: () = ();
1141
1142 fn available_regions(&self) -> usize {
1143 self.user_available_regions::<TOR_USER_REGIONS_DEBUG_ENABLE>()
1144 }
1145
1146 fn configure_pmp(
1147 &self,
1148 regions: &[(TORUserPMPCFG, *const u8, *const u8); TOR_USER_REGIONS_DEBUG_ENABLE],
1149 ) -> Result<(), ()> {
1150 self.user_configure_pmp::<TOR_USER_REGIONS_DEBUG_ENABLE>(regions)
1151 }
1152
1153 fn enable_user_pmp(&self) -> Result<(), ()> {
1154 self.user_enable_user_pmp()
1155 }
1156
1157 fn disable_user_pmp(&self) {
1158 // Technically, the `disable_user_pmp` can be implemented as a no-op in
1159 // the debug-mode ePMP, as machine-mode lockdown (MML) is not enabled.
1160 // However, we still execercise these routines to stay as close to the
1161 // non-debug ePMP configuration as possible:
1162 self.user_disable_user_pmp()
1163 }
1164}
1165
1166impl<const HANDOVER_CONFIG_CHECK: bool> TORUserPMP<{ TOR_USER_REGIONS_DEBUG_DISABLE }>
1167 for EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugDisable>
1168{
1169 // Don't require any const-assertions in the EarlGreyEPMP.
1170 const CONST_ASSERT_CHECK: () = ();
1171
1172 fn available_regions(&self) -> usize {
1173 self.user_available_regions::<TOR_USER_REGIONS_DEBUG_DISABLE>()
1174 }
1175
1176 fn configure_pmp(
1177 &self,
1178 regions: &[(TORUserPMPCFG, *const u8, *const u8); TOR_USER_REGIONS_DEBUG_DISABLE],
1179 ) -> Result<(), ()> {
1180 self.user_configure_pmp::<TOR_USER_REGIONS_DEBUG_DISABLE>(regions)
1181 }
1182
1183 fn enable_user_pmp(&self) -> Result<(), ()> {
1184 self.user_enable_user_pmp()
1185 }
1186
1187 fn disable_user_pmp(&self) {
1188 self.user_disable_user_pmp()
1189 }
1190}
1191
1192impl<const HANDOVER_CONFIG_CHECK: bool, DBG: EPMPDebugConfig> fmt::Display
1193 for EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, DBG>
1194{
1195 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1196 use kernel::utilities::registers::interfaces::Readable;
1197
1198 write!(f, " EarlGrey ePMP configuration:\r\n")?;
1199 write!(
1200 f,
1201 " mseccfg: {:#08X}, user-mode PMP active: {:?}\r\n",
1202 csr::CSR.mseccfg.get(),
1203 self.user_pmp_enabled.get()
1204 )?;
1205 unsafe { format_pmp_entries::<PMP_ENTRIES>(f) }?;
1206
1207 write!(f, " Shadow PMP entries for user-mode:\r\n")?;
1208 for (i, shadowed_pmpcfg) in self.shadow_user_pmpcfgs[..DBG::TOR_USER_REGIONS]
1209 .iter()
1210 .enumerate()
1211 {
1212 let (start_pmpaddr_label, startaddr_pmpaddr, endaddr, mode) =
1213 if shadowed_pmpcfg.get() == TORUserPMPCFG::OFF {
1214 (
1215 "pmpaddr",
1216 csr::CSR.pmpaddr_get(DBG::TOR_USER_ENTRIES_OFFSET + (i * 2)),
1217 0,
1218 "OFF",
1219 )
1220 } else {
1221 (
1222 " start",
1223 csr::CSR
1224 .pmpaddr_get(DBG::TOR_USER_ENTRIES_OFFSET + (i * 2))
1225 .overflowing_shl(2)
1226 .0,
1227 csr::CSR
1228 .pmpaddr_get(DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 1)
1229 .overflowing_shl(2)
1230 .0
1231 | 0b11,
1232 "TOR",
1233 )
1234 };
1235
1236 write!(
1237 f,
1238 " [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}) ({}{}{}{})\r\n",
1239 DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 1,
1240 start_pmpaddr_label,
1241 startaddr_pmpaddr,
1242 endaddr,
1243 shadowed_pmpcfg.get().get(),
1244 mode,
1245 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::l) {
1246 "l"
1247 } else {
1248 "-"
1249 },
1250 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::r) {
1251 "r"
1252 } else {
1253 "-"
1254 },
1255 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::w) {
1256 "w"
1257 } else {
1258 "-"
1259 },
1260 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::x) {
1261 "x"
1262 } else {
1263 "-"
1264 },
1265 )?;
1266 }
1267
1268 Ok(())
1269 }
1270}