riscv/
pmp.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5use core::cell::Cell;
6use core::num::NonZeroUsize;
7use core::ops::Range;
8use core::{cmp, fmt};
9
10use kernel::platform::mpu;
11use kernel::utilities::cells::OptionalCell;
12use kernel::utilities::registers::{register_bitfields, LocalRegisterCopy};
13
14use crate::csr;
15
16register_bitfields![u8,
17    /// Generic `pmpcfg` octet.
18    ///
19    /// A PMP entry is configured through `pmpaddrX` and `pmpcfgX` CSRs, where a
20    /// single `pmpcfgX` CSRs holds multiple octets, each affecting the access
21    /// permission, addressing mode and "lock" attributes of a single `pmpaddrX`
22    /// CSR. This bitfield definition represents a single, `u8`-backed `pmpcfg`
23    /// octet affecting a single `pmpaddr` entry.
24    pub pmpcfg_octet [
25        r OFFSET(0) NUMBITS(1) [],
26        w OFFSET(1) NUMBITS(1) [],
27        x OFFSET(2) NUMBITS(1) [],
28        a OFFSET(3) NUMBITS(2) [
29            OFF = 0,
30            TOR = 1,
31            NA4 = 2,
32            NAPOT = 3
33        ],
34        l OFFSET(7) NUMBITS(1) []
35    ]
36];
37
38/// Mask for valid values of the `pmpaddrX` CSRs on RISCV platforms.
39///
40/// RV64 platforms support only a 56 bit physical address space. For this reason
41/// (and because addresses in `pmpaddrX` CSRs are left-shifted by 2 bit) the
42/// uppermost 10 bits of a `pmpaddrX` CSR are defined as WARL-0. ANDing with
43/// this mask achieves the same effect; thus it can be used to determine whether
44/// a given PMP region spec would be legal and applied before writing it to a
45/// `pmpaddrX` CSR. For RV32 platforms, th whole 32 bit address range is valid.
46///
47/// This mask will have the value `0x003F_FFFF_FFFF_FFFF` on RV64 platforms, and
48/// `0xFFFFFFFF` on RV32 platforms.
49const PMPADDR_MASK: usize = (0x003F_FFFF_FFFF_FFFFu64 & usize::MAX as u64) as usize;
50
51/// A `pmpcfg` octet for a user-mode (non-locked) TOR-addressed PMP region.
52///
53/// This is a wrapper around a [`pmpcfg_octet`] (`u8`) register type, which
54/// guarantees that the wrapped `pmpcfg` octet is always set to be either
55/// [`TORUserPMPCFG::OFF`] (set to `0x00`), or in a non-locked, TOR-addressed
56/// configuration.
57///
58/// By accepting this type, PMP implements can rely on the above properties to
59/// hold by construction and avoid runtime checks. For example, this type is
60/// used in the [`TORUserPMP::configure_pmp`] method.
61#[derive(Copy, Clone, Debug)]
62pub struct TORUserPMPCFG(LocalRegisterCopy<u8, pmpcfg_octet::Register>);
63
64impl TORUserPMPCFG {
65    pub const OFF: TORUserPMPCFG = TORUserPMPCFG(LocalRegisterCopy::new(0));
66
67    /// Extract the `u8` representation of the [`pmpcfg_octet`] register.
68    pub fn get(&self) -> u8 {
69        self.0.get()
70    }
71
72    /// Extract a copy of the contained [`pmpcfg_octet`] register.
73    pub fn get_reg(&self) -> LocalRegisterCopy<u8, pmpcfg_octet::Register> {
74        self.0
75    }
76}
77
78impl PartialEq<TORUserPMPCFG> for TORUserPMPCFG {
79    fn eq(&self, other: &Self) -> bool {
80        self.0.get() == other.0.get()
81    }
82}
83
84impl Eq for TORUserPMPCFG {}
85
86impl From<mpu::Permissions> for TORUserPMPCFG {
87    fn from(p: mpu::Permissions) -> Self {
88        let fv = match p {
89            mpu::Permissions::ReadWriteExecute => {
90                pmpcfg_octet::r::SET + pmpcfg_octet::w::SET + pmpcfg_octet::x::SET
91            }
92            mpu::Permissions::ReadWriteOnly => {
93                pmpcfg_octet::r::SET + pmpcfg_octet::w::SET + pmpcfg_octet::x::CLEAR
94            }
95            mpu::Permissions::ReadExecuteOnly => {
96                pmpcfg_octet::r::SET + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::SET
97            }
98            mpu::Permissions::ReadOnly => {
99                pmpcfg_octet::r::SET + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::CLEAR
100            }
101            mpu::Permissions::ExecuteOnly => {
102                pmpcfg_octet::r::CLEAR + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::SET
103            }
104        };
105
106        TORUserPMPCFG(LocalRegisterCopy::new(
107            (fv + pmpcfg_octet::l::CLEAR + pmpcfg_octet::a::TOR).value,
108        ))
109    }
110}
111
112/// A RISC-V PMP memory region specification, configured in NAPOT mode.
113///
114/// This type checks that the supplied `start` and `size` values meet the RISC-V
115/// NAPOT requirements, namely that
116///
117/// - the region is a power of two bytes in size
118/// - the region's start address is aligned to the region size
119/// - the region is at least 8 bytes long
120///
121/// Finally, RISC-V restricts physical address spaces to 34 bit on RV32, and 56
122/// bit on RV64 platforms. A `NAPOTRegionSpec` must not cover addresses
123/// exceeding this address space, respectively. In practice, this means that on
124/// RV64 platforms `NAPOTRegionSpec`s whose encoded `pmpaddrX` CSR contains any
125/// non-zero bits in the 10 most significant bits will be rejected.
126///
127/// By accepting this type, PMP implementations can rely on these requirements
128/// to be verified. Furthermore, they can use the [`NAPOTRegionSpec::pmpaddr`]
129/// convenience method to retrieve an `pmpaddrX` CSR value encoding this
130/// region's address and length.
131#[derive(Copy, Clone, Debug)]
132pub struct NAPOTRegionSpec {
133    pmpaddr: usize,
134}
135
136impl NAPOTRegionSpec {
137    /// Construct a new [`NAPOTRegionSpec`] from a pmpaddr CSR value.
138    ///
139    /// For an RV32 platform, every single integer in `[0; usize::MAX]` is a
140    /// valid `pmpaddrX` CSR for a region configured in NAPOT mode, and this
141    /// operation is thus effectively infallible.
142    ///
143    /// For RV64 platforms, this operation checks if the range would include any
144    /// address outside of the 56 bit physical address space and, in this case,
145    /// rejects the `pmpaddr` (tests whether any of the 10 most significant bits
146    /// are non-zero).
147    pub fn from_pmpaddr_csr(pmpaddr: usize) -> Option<Self> {
148        // On 64-bit platforms, the 10 most significant bits must be 0
149        // Prevent the `&-masking with zero` lint error in case of RV32
150        // The redundant checks in this case are optimized out by the compiler on any 1-3,z opt-level
151        #[allow(clippy::bad_bit_mask)]
152        (pmpaddr & !PMPADDR_MASK == 0).then_some(NAPOTRegionSpec { pmpaddr })
153    }
154
155    /// Construct a new [`NAPOTRegionSpec`] from a start address and size.
156    ///
157    /// This method accepts a `start` address and a region length. It returns
158    /// `Some(region)` when all constraints specified in the
159    /// [`NAPOTRegionSpec`]'s documentation are satisfied, otherwise `None`.
160    pub fn from_start_size(start: *const u8, size: usize) -> Option<Self> {
161        if !size.is_power_of_two() || start.addr() % size != 0 || size < 8 {
162            return None;
163        }
164
165        Self::from_pmpaddr_csr(
166            (start.addr() + (size - 1).overflowing_shr(1).0)
167                .overflowing_shr(2)
168                .0,
169        )
170    }
171
172    /// Construct a new [`NAPOTRegionSpec`] from a start address and end address.
173    ///
174    /// This method accepts a `start` address (inclusive) and `end` address
175    /// (exclusive). It returns `Some(region)` when all constraints specified in
176    /// the [`NAPOTRegionSpec`]'s documentation are satisfied, otherwise `None`.
177    pub fn from_start_end(start: *const u8, end: *const u8) -> Option<Self> {
178        end.addr()
179            .checked_sub(start.addr())
180            .and_then(|size| Self::from_start_size(start, size))
181    }
182
183    /// Retrieve a `pmpaddrX`-CSR compatible representation of this
184    /// [`NAPOTRegionSpec`]'s address and length. For this value to be valid in
185    /// a `CSR` register, the `pmpcfgX` octet's `A` (address mode) value
186    /// belonging to this `pmpaddrX`-CSR must be set to `NAPOT` (0b11).
187    pub fn pmpaddr(&self) -> usize {
188        self.pmpaddr
189    }
190
191    /// Return the range of physical addresses covered by this PMP region.
192    ///
193    /// This follows the regular Rust range semantics (start inclusive, end
194    /// exclusive). It returns the addresses as u64-integers to ensure that all
195    /// underlying pmpaddrX CSR values can be represented.
196    pub fn address_range(&self) -> core::ops::Range<u64> {
197        let trailing_ones: u64 = self.pmpaddr.trailing_ones() as u64;
198        let size = 0b1000_u64 << trailing_ones;
199        let base_addr: u64 =
200            (self.pmpaddr as u64 & !((1_u64 << trailing_ones).saturating_sub(1))) << 2;
201        base_addr..(base_addr.saturating_add(size))
202    }
203}
204
205/// A RISC-V PMP memory region specification, configured in TOR mode.
206///
207/// This type checks that the supplied `start` and `end` addresses meet the
208/// RISC-V TOR requirements, namely that
209///
210/// - the region's start address is aligned to a 4-byte boundary
211/// - the region's end address is aligned to a 4-byte boundary
212/// - the region is at least 4 bytes long
213///
214/// Finally, RISC-V restricts physical address spaces to 34 bit on RV32, and 56
215/// bit on RV64 platforms. A `TORRegionSpec` must not cover addresses exceeding
216/// this address space, respectively. In practice, this means that on RV64
217/// platforms `TORRegionSpec`s whose encoded `pmpaddrX` CSR contains any
218/// non-zero bits in the 10 most significant bits will be rejected. In
219/// particular, with the `end` pmpaddrX CSR / address being exclusive, the
220/// region cannot span the last 4 bytes of the 56-bit address space on RV64, or
221/// the last 4 bytes of the 34-bit address space on RV32.
222///
223/// By accepting this type, PMP implementations can rely on these requirements
224/// to be verified.
225#[derive(Copy, Clone, Debug)]
226pub struct TORRegionSpec {
227    pmpaddr_a: usize,
228    pmpaddr_b: usize,
229}
230
231impl TORRegionSpec {
232    /// Construct a new [`TORRegionSpec`] from a pair of pmpaddrX CSR values.
233    ///
234    /// This method accepts two `pmpaddrX` CSR values that together are
235    /// configured to describe a single TOR memory region. The second `pmpaddr_b`
236    /// must be strictly greater than `pmpaddr_a`, which translates into a
237    /// minimum region size of 4 bytes. Otherwise this function returns `None`.
238    ///
239    /// For RV64 platforms, this operation also checks if the range would
240    /// include any address outside of the 56 bit physical address space and, in
241    /// this case, returns `None` (tests whether any of the 10 most significant
242    /// bits of either `pmpaddr` are non-zero).
243    pub fn from_pmpaddr_csrs(pmpaddr_a: usize, pmpaddr_b: usize) -> Option<TORRegionSpec> {
244        // Prevent the `&-masking with zero` lint error in case of RV32
245        // The redundant checks in this case are optimized out by the compiler on any 1-3,z opt-level
246        #[allow(clippy::bad_bit_mask)]
247        ((pmpaddr_a < pmpaddr_b)
248            && (pmpaddr_a & !PMPADDR_MASK == 0)
249            && (pmpaddr_b & !PMPADDR_MASK == 0))
250            .then_some(TORRegionSpec {
251                pmpaddr_a,
252                pmpaddr_b,
253            })
254    }
255
256    /// Construct a new [`TORRegionSpec`] from a range of addresses.
257    ///
258    /// This method accepts a `start` and `end` address. It returns
259    /// `Some(region)` when all constraints specified in the [`TORRegionSpec`]'s
260    /// documentation are satisfied, otherwise `None`.
261    pub fn from_start_end(start: *const u8, end: *const u8) -> Option<Self> {
262        if (start as usize) % 4 != 0
263            || (end as usize) % 4 != 0
264            || (end as usize)
265                .checked_sub(start as usize)
266                .is_none_or(|size| size < 4)
267        {
268            return None;
269        }
270
271        Self::from_pmpaddr_csrs(start.addr() >> 2, end.addr() >> 2)
272    }
273
274    /// Get the first `pmpaddrX` CSR value that this TORRegionSpec encodes.
275    pub fn pmpaddr_a(&self) -> usize {
276        self.pmpaddr_a
277    }
278
279    pub fn pmpaddr_b(&self) -> usize {
280        self.pmpaddr_b
281    }
282}
283
284/// Helper method to check if a [`PMPUserMPUConfig`] region overlaps with a
285/// region specified by `other_start` and `other_size`.
286///
287/// Matching the RISC-V spec this checks `pmpaddr[i-i] <= y < pmpaddr[i]` for TOR
288/// ranges.
289fn region_overlaps(
290    region: &(TORUserPMPCFG, *const u8, *const u8),
291    other_start: *const u8,
292    other_size: usize,
293) -> bool {
294    // PMP TOR regions are not inclusive on the high end, that is
295    //     pmpaddr[i-i] <= y < pmpaddr[i].
296    //
297    // This happens to coincide with the definition of the Rust half-open Range
298    // type, which provides a convenient `.contains()` method:
299    let region_range = Range {
300        start: region.1 as usize,
301        end: region.2 as usize,
302    };
303
304    let other_range = Range {
305        start: other_start as usize,
306        end: other_start as usize + other_size,
307    };
308
309    // For a range A to overlap with a range B, either B's first or B's last
310    // element must be contained in A, or A's first or A's last element must be
311    // contained in B. As we deal with half-open ranges, ensure that neither
312    // range is empty.
313    //
314    // This implementation is simple and stupid, and can be optimized. We leave
315    // that as an exercise to the compiler.
316    !region_range.is_empty()
317        && !other_range.is_empty()
318        && (region_range.contains(&other_range.start)
319            || region_range.contains(&(other_range.end.saturating_sub(1)))
320            || other_range.contains(&region_range.start)
321            || other_range.contains(&(region_range.end.saturating_sub(1))))
322}
323
324#[cfg(test)]
325pub mod misc_pmp_test {
326    #[test]
327    fn test_napot_region_spec_from_pmpaddr_csr() {
328        use super::NAPOTRegionSpec;
329
330        // Unfortunatly, we can't run these unit tests for different platforms,
331        // with arbitrary bit-widths (at least when using `usize` in the
332        // `TORRegionSpec` internally.
333        //
334        // For now, we check whatever word-size our host-platform has and
335        // generate our test vectors according to those expectations.
336        let pmpaddr_max: usize = if core::mem::size_of::<usize>() == 8 {
337            // This deliberately does not re-use the `PMPADDR_RV64_MASK`
338            // constant which should be equal to this value:
339            0x003F_FFFF_FFFF_FFFF_u64.try_into().unwrap()
340        } else {
341            usize::MAX
342        };
343
344        for (valid, pmpaddr, start, end) in [
345            // Basic sanity checks:
346            (true, 0b0000, 0b0000_0000, 0b0000_1000),
347            (true, 0b0001, 0b0000_0000, 0b0001_0000),
348            (true, 0b0010, 0b0000_1000, 0b0001_0000),
349            (true, 0b0011, 0b0000_0000, 0b0010_0000),
350            (true, 0b0101, 0b0001_0000, 0b0010_0000),
351            (true, 0b1011, 0b0010_0000, 0b0100_0000),
352            // Can span the whole address space (up to 34 bit on RV32, and 5
353            // bit on RV64, 2^{XLEN + 3) byte NAPOT range).
354            (
355                true,
356                pmpaddr_max,
357                0,
358                if core::mem::size_of::<usize>() == 8 {
359                    0x0200_0000_0000_0000
360                } else {
361                    0x0000_0008_0000_0000
362                },
363            ),
364            // Cannot create region larger than `pmpaddr_max`:
365            (
366                core::mem::size_of::<usize>() != 8,
367                pmpaddr_max.saturating_add(1),
368                0,
369                if core::mem::size_of::<usize>() == 8 {
370                    // Doesn't matter, operation should fail:
371                    0
372                } else {
373                    0x0000_0008_0000_0000
374                },
375            ),
376        ] {
377            match (valid, NAPOTRegionSpec::from_pmpaddr_csr(pmpaddr)) {
378                (true, Some(region)) => {
379                    assert_eq!(
380                        region.pmpaddr(),
381                        pmpaddr,
382                        "NAPOTRegionSpec::from_pmpaddr_csr yields wrong CSR value (0x{:x?} vs. 0x{:x?})",
383                        pmpaddr,
384                        region.pmpaddr()
385                    );
386                    assert_eq!(
387                        region.address_range(),
388                        start..end,
389                        "NAPOTRegionSpec::from_pmpaddr_csr yields wrong address range value for CSR 0x{:x?} (0x{:x?}..0x{:x?} vs. 0x{:x?}..0x{:x?})",
390                        pmpaddr,
391                        region.address_range().start,
392                        region.address_range().end,
393                        start,
394                        end
395                    );
396                }
397
398                (true, None) => {
399                    panic!(
400                        "Failed to create NAPOT region over pmpaddr CSR ({:x?}), but has to succeed!",
401                        pmpaddr,
402                    );
403                }
404
405                (false, Some(region)) => {
406                    panic!(
407                        "Creation of TOR region over pmpaddr CSR {:x?} must fail, but succeeded: {:?}",
408                        pmpaddr, region,
409                    );
410                }
411
412                (false, None) => {
413                    // Good, nothing to do here.
414                }
415            }
416        }
417    }
418
419    #[test]
420    fn test_tor_region_spec_from_pmpaddr_csrs() {
421        use super::TORRegionSpec;
422        // Unfortunatly, we can't run these unit tests for different platforms,
423        // with arbitrary bit-widths (at least when using `usize` in the
424        // `TORRegionSpec` internally.
425        //
426        // For now, we check whatever word-size our host-platform has and
427        // generate our test vectors according to those expectations.
428        let pmpaddr_max: usize = if core::mem::size_of::<usize>() == 8 {
429            // This deliberately does not re-use the `PMPADDR_RV64_MASK`
430            // constant which should be equal to this value:
431            0x003F_FFFF_FFFF_FFFF_u64.try_into().unwrap()
432        } else {
433            usize::MAX
434        };
435
436        for (valid, pmpaddr_a, pmpaddr_b) in [
437            // Can span the whole address space (up to 34 bit on RV32, and 56
438            // bit on RV64):
439            (true, 0, 1),
440            (true, 0x8badf00d, 0xdeadbeef),
441            (true, pmpaddr_max - 1, pmpaddr_max),
442            (true, 0, pmpaddr_max),
443            // Cannot create region smaller than 4 bytes:
444            (false, 0, 0),
445            (false, 0xdeadbeef, 0xdeadbeef),
446            (false, pmpaddr_max, pmpaddr_max),
447            // On 64-bit systems, cannot create region that exceeds 56 bit:
448            (
449                core::mem::size_of::<usize>() != 8,
450                0,
451                pmpaddr_max.saturating_add(1),
452            ),
453            // Cannot create region with end before start:
454            (false, 1, 0),
455            (false, 0xdeadbeef, 0x8badf00d),
456            (false, pmpaddr_max, 0),
457        ] {
458            match (
459                valid,
460                TORRegionSpec::from_pmpaddr_csrs(pmpaddr_a, pmpaddr_b),
461            ) {
462                (true, Some(region)) => {
463                    assert_eq!(region.pmpaddr_a(), pmpaddr_a);
464                    assert_eq!(region.pmpaddr_b(), pmpaddr_b);
465                }
466
467                (true, None) => {
468                    panic!(
469                        "Failed to create TOR region over pmpaddr CSRS ({:x?}, {:x?}), but has to succeed!",
470                        pmpaddr_a, pmpaddr_b,
471                    );
472                }
473
474                (false, Some(region)) => {
475                    panic!(
476                        "Creation of TOR region over pmpaddr CSRs ({:x?}, {:x?}) must fail, but succeeded: {:?}",
477                        pmpaddr_a, pmpaddr_b, region
478                    );
479                }
480
481                (false, None) => {
482                    // Good, nothing to do here.
483                }
484            }
485        }
486    }
487
488    #[test]
489    fn test_tor_region_spec_from_start_end_addrs() {
490        use super::TORRegionSpec;
491
492        fn panicing_shr_2(i: usize) -> usize {
493            assert_eq!(i & 0b11, 0);
494            i >> 2
495        }
496
497        // Unfortunatly, we can't run these unit tests for different platforms,
498        // with arbitrary bit-widths (at least when using `usize` in the
499        // `TORRegionSpec` internally.
500        //
501        // For now, we check whatever word-size our host-platform has and
502        // generate our test vectors according to those expectations.
503        let last_addr: usize = if core::mem::size_of::<usize>() == 8 {
504            0x03F_FFFF_FFFF_FFFC_u64.try_into().unwrap()
505        } else {
506            // For 32-bit platforms, this cannot actually cover the whole
507            // 32-bit address space. We must exclude the last 4 bytes.
508            usize::MAX & (!0b11)
509        };
510
511        for (valid, start, end) in [
512            // Can span the whole address space (up to 34 bit on RV32, and 56
513            // bit on RV64):
514            (true, 0, 4),
515            (true, 0x13374200, 0xdead10cc),
516            (true, last_addr - 4, last_addr),
517            (true, 0, last_addr),
518            // Cannot create region with start and end address not aligned on
519            // 4-byte boundary:
520            (false, 4, 5),
521            (false, 4, 6),
522            (false, 4, 7),
523            (false, 5, 8),
524            (false, 6, 8),
525            (false, 7, 8),
526            // Cannot create region smaller than 4 bytes:
527            (false, 0, 0),
528            (false, 0x13374200, 0x13374200),
529            (false, 0x13374200, 0x13374201),
530            (false, 0x13374200, 0x13374202),
531            (false, 0x13374200, 0x13374203),
532            (false, last_addr, last_addr),
533            // On 64-bit systems, cannot create region that exceeds 56 or covers
534            // the last 4 bytes of this address space. On 32-bit, cannot cover
535            // the full address space (excluding the last 4 bytes of the address
536            // space):
537            (false, 0, last_addr.checked_add(1).unwrap()),
538            // Cannot create region with end before start:
539            (false, 4, 0),
540            (false, 0xdeadbeef, 0x8badf00d),
541            (false, last_addr, 0),
542        ] {
543            match (
544                valid,
545                TORRegionSpec::from_start_end(start as *const u8, end as *const u8),
546            ) {
547                (true, Some(region)) => {
548                    assert_eq!(region.pmpaddr_a(), panicing_shr_2(start));
549                    assert_eq!(region.pmpaddr_b(), panicing_shr_2(end));
550                }
551
552                (true, None) => {
553                    panic!(
554                        "Failed to create TOR region from address range [{:x?}, {:x?}), but has to succeed!",
555                        start, end,
556                    );
557                }
558
559                (false, Some(region)) => {
560                    panic!(
561                        "Creation of TOR region from address range [{:x?}, {:x?}) must fail, but succeeded: {:?}",
562                        start, end, region
563                    );
564                }
565
566                (false, None) => {
567                    // Good, nothing to do here.
568                }
569            }
570        }
571    }
572}
573
574/// Print a table of the configured PMP regions, read from  the HW CSRs.
575///
576/// # Safety
577///
578/// This function is unsafe, as it relies on the PMP CSRs to be accessible, and
579/// the hardware to feature `PHYSICAL_ENTRIES` PMP CSR entries. If these
580/// conditions are not met, calling this function can result in undefinied
581/// behavior (e.g., cause a system trap).
582pub unsafe fn format_pmp_entries<const PHYSICAL_ENTRIES: usize>(
583    f: &mut fmt::Formatter<'_>,
584) -> fmt::Result {
585    for i in 0..PHYSICAL_ENTRIES {
586        // Extract the entry's pmpcfgX register value. The pmpcfgX CSRs are
587        // tightly packed and contain 4 octets beloging to individual
588        // entries. Convert this into a u8-wide LocalRegisterCopy<u8,
589        // pmpcfg_octet> as a generic register type, independent of the entry's
590        // offset.
591        let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
592            csr::CSR
593                .pmpconfig_get(i / 4)
594                .overflowing_shr(((i % 4) * 8) as u32)
595                .0 as u8,
596        );
597
598        // The address interpretation is different for every mode. Return both a
599        // string indicating the PMP entry's mode, as well as the effective
600        // start and end address (inclusive) affected by the region. For regions
601        // that are OFF, we still want to expose the pmpaddrX register value --
602        // thus return the raw unshifted value as the addr, and 0 as the
603        // region's end.
604        let (start_label, start, end, mode) = match pmpcfg.read_as_enum(pmpcfg_octet::a) {
605            Some(pmpcfg_octet::a::Value::OFF) => {
606                let addr = csr::CSR.pmpaddr_get(i);
607                ("pmpaddr", addr, 0, "OFF  ")
608            }
609
610            Some(pmpcfg_octet::a::Value::TOR) => {
611                let start = if i > 0 {
612                    csr::CSR.pmpaddr_get(i - 1)
613                } else {
614                    0
615                };
616
617                (
618                    "  start",
619                    start.overflowing_shl(2).0,
620                    csr::CSR.pmpaddr_get(i).overflowing_shl(2).0.wrapping_sub(1),
621                    "TOR  ",
622                )
623            }
624
625            Some(pmpcfg_octet::a::Value::NA4) => {
626                let addr = csr::CSR.pmpaddr_get(i).overflowing_shl(2).0;
627                ("  start", addr, addr | 0b11, "NA4  ")
628            }
629
630            Some(pmpcfg_octet::a::Value::NAPOT) => {
631                let pmpaddr = csr::CSR.pmpaddr_get(i);
632                let encoded_size = pmpaddr.trailing_ones();
633                if (encoded_size as usize) < (core::mem::size_of_val(&pmpaddr) * 8 - 1) {
634                    let start = pmpaddr - ((1 << encoded_size) - 1);
635                    let end = start + (1 << (encoded_size + 1)) - 1;
636                    (
637                        "  start",
638                        start.overflowing_shl(2).0,
639                        end.overflowing_shl(2).0 | 0b11,
640                        "NAPOT",
641                    )
642                } else {
643                    ("  start", usize::MIN, usize::MAX, "NAPOT")
644                }
645            }
646
647            None => {
648                // We match on a 2-bit value with 4 variants, so this is
649                // unreachable. However, don't insert a panic in case this
650                // doesn't get optimized away:
651                ("", 0, 0, "")
652            }
653        };
654
655        // Ternary operator shortcut function, to avoid bulky formatting...
656        fn t<T>(cond: bool, a: T, b: T) -> T {
657            if cond {
658                a
659            } else {
660                b
661            }
662        }
663
664        write!(
665            f,
666            "  [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}) ({}{}{}{})\r\n",
667            i,
668            start_label,
669            start,
670            end,
671            pmpcfg.get(),
672            mode,
673            t(pmpcfg.is_set(pmpcfg_octet::l), "l", "-"),
674            t(pmpcfg.is_set(pmpcfg_octet::r), "r", "-"),
675            t(pmpcfg.is_set(pmpcfg_octet::w), "w", "-"),
676            t(pmpcfg.is_set(pmpcfg_octet::x), "x", "-"),
677        )?;
678    }
679
680    Ok(())
681}
682
683/// A RISC-V PMP implementation exposing a number of TOR memory protection
684/// regions to the [`PMPUserMPU`].
685///
686/// The RISC-V PMP is complex and can be used to enforce memory protection in
687/// various modes (Machine, Supervisor and User mode). Depending on the exact
688/// extension set present (e.g., ePMP) and the machine's security configuration
689/// bits, it may expose a vastly different set of constraints and application
690/// semantics.
691///
692/// Because we can't possibly capture all of this in a single readable,
693/// maintainable and efficient implementation, we implement a two-layer system:
694///
695/// - a [`TORUserPMP`] is a simple abstraction over some underlying PMP hardware
696///   implementation, which exposes an interface to configure regions that are
697///   active (enforced) in user-mode and can be configured for arbitrary
698///   addresses on a 4-byte granularity.
699///
700/// - the [`PMPUserMPU`] takes this abstraction and implements the Tock kernel's
701///   [`mpu::MPU`] trait. It worries about re-configuring memory protection when
702///   switching processes, allocating memory regions of an appropriate size,
703///   etc.
704///
705/// Implementors of a chip are free to define their own [`TORUserPMP`]
706/// implementations, adhering to their specific PMP layout & constraints,
707/// provided they implement this trait.
708///
709/// The `MAX_REGIONS` const generic is used to indicate the maximum number of
710/// TOR PMP regions available to the [`PMPUserMPU`]. The PMP implementation may
711/// provide less regions than indicated through `MAX_REGIONS`, for instance when
712/// entries are enforced (locked) in machine mode. The number of available
713/// regions may change at runtime. The current number of regions available to
714/// the [`PMPUserMPU`] is indicated by the [`TORUserPMP::available_regions`]
715/// method. However, when it is known that a number of regions are not available
716/// for userspace protection, `MAX_REGIONS` can be used to reduce the memory
717/// footprint allocated by stored PMP configurations, as well as the
718/// re-configuration overhead.
719pub trait TORUserPMP<const MAX_REGIONS: usize> {
720    /// A placeholder to define const-assertions which are evaluated in
721    /// [`PMPUserMPU::new`]. This can be used to, for instance, assert that the
722    /// number of userspace regions does not exceed the number of hardware
723    /// regions.
724    const CONST_ASSERT_CHECK: ();
725
726    /// The number of TOR regions currently available for userspace memory
727    /// protection. Within `[0; MAX_REGIONS]`.
728    ///
729    /// The PMP implementation may provide less regions than indicated through
730    /// `MAX_REGIONS`, for instance when entries are enforced (locked) in
731    /// machine mode. The number of available regions may change at runtime. The
732    /// implementation is free to map these regions to arbitrary PMP entries
733    /// (and change this mapping at runtime), provided that they are enforced
734    /// when the hart is in user-mode, and other memory regions are generally
735    /// inaccessible when in user-mode.
736    ///
737    /// When allocating regions for kernel-mode protection, and thus reducing
738    /// the number of regions available to userspace, re-configuring the PMP may
739    /// fail. This is allowed behavior. However, the PMP must not remove any
740    /// regions from the user-mode current configuration while it is active
741    /// ([`TORUserPMP::enable_user_pmp`] has been called, and it has not been
742    /// disabled through [`TORUserPMP::disable_user_pmp`]).
743    fn available_regions(&self) -> usize;
744
745    /// Configure the user-mode memory protection.
746    ///
747    /// This method configures the user-mode memory protection, to be enforced
748    /// on a call to [`TORUserPMP::enable_user_pmp`].
749    ///
750    /// PMP implementations where configured regions are only enforced in
751    /// user-mode may re-configure the PMP on this function invocation and
752    /// implement [`TORUserPMP::enable_user_pmp`] as a no-op. If configured
753    /// regions are enforced in machine-mode (for instance when using an ePMP
754    /// with the machine-mode whitelist policy), the new configuration rules
755    /// must not apply until [`TORUserPMP::enable_user_pmp`].
756    ///
757    /// The tuples as passed in the `regions` parameter are defined as follows:
758    ///
759    /// - first value ([`TORUserPMPCFG`]): the memory protection mode as
760    ///   enforced on the region. A `TORUserPMPCFG` can be created from the
761    ///   [`mpu::Permissions`] type. It is in a format compatible to the pmpcfgX
762    ///   register, guaranteed to not have the lock (`L`) bit set, and
763    ///   configured either as a TOR region (`A = 0b01`), or disabled (all bits
764    ///   set to `0`).
765    ///
766    /// - second value (`*const u8`): the region's start addres. As a PMP TOR
767    ///   region has a 4-byte address granularity, this address is rounded down
768    ///   to the next 4-byte boundary.
769    ///
770    /// - third value (`*const u8`): the region's end addres. As a PMP TOR
771    ///   region has a 4-byte address granularity, this address is rounded down
772    ///   to the next 4-byte boundary.
773    ///
774    /// To disable a region, set its configuration to [`TORUserPMPCFG::OFF`]. In
775    /// this case, the start and end addresses are ignored and can be set to
776    /// arbitrary values.
777    fn configure_pmp(
778        &self,
779        regions: &[(TORUserPMPCFG, *const u8, *const u8); MAX_REGIONS],
780    ) -> Result<(), ()>;
781
782    /// Enable the user-mode memory protection.
783    ///
784    /// Enables the memory protection for user-mode, as configured through
785    /// [`TORUserPMP::configure_pmp`]. Enabling the PMP for user-mode may make
786    /// the user-mode accessible regions inaccessible to the kernel. For PMP
787    /// implementations where configured regions are only enforced in user-mode,
788    /// this method may be implemented as a no-op.
789    ///
790    /// If enabling the current configuration is not possible (e.g., because
791    /// regions have been allocated to the kernel), this function must return
792    /// `Err(())`. Otherwise, this function returns `Ok(())`.
793    fn enable_user_pmp(&self) -> Result<(), ()>;
794
795    /// Disable the user-mode memory protection.
796    ///
797    /// Disables the memory protection for user-mode. If enabling the user-mode
798    /// memory protetion made user-mode accessible regions inaccessible to
799    /// machine-mode, this method should make these regions accessible again.
800    ///
801    /// For PMP implementations where configured regions are only enforced in
802    /// user-mode, this method may be implemented as a no-op. This method is not
803    /// responsible for making regions inaccessible to user-mode. If previously
804    /// configured regions must be made inaccessible,
805    /// [`TORUserPMP::configure_pmp`] must be used to re-configure the PMP
806    /// accordingly.
807    fn disable_user_pmp(&self);
808}
809
810/// Struct storing userspace memory protection regions for the [`PMPUserMPU`].
811pub struct PMPUserMPUConfig<const MAX_REGIONS: usize> {
812    /// PMP config identifier, as generated by the issuing PMP implementation.
813    id: NonZeroUsize,
814    /// Indicates if the configuration has changed since the last time it was
815    /// written to hardware.
816    is_dirty: Cell<bool>,
817    /// Array of MPU regions. Each region requires two physical PMP entries.
818    regions: [(TORUserPMPCFG, *const u8, *const u8); MAX_REGIONS],
819    /// Which region index (into the `regions` array above) is used
820    /// for app memory (if it has been configured).
821    app_memory_region: OptionalCell<usize>,
822}
823
824impl<const MAX_REGIONS: usize> fmt::Display for PMPUserMPUConfig<MAX_REGIONS> {
825    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
826        // Ternary operator shortcut function, to avoid bulky formatting...
827        fn t<T>(cond: bool, a: T, b: T) -> T {
828            if cond {
829                a
830            } else {
831                b
832            }
833        }
834
835        write!(
836            f,
837            " PMPUserMPUConfig {{\r\n  id: {},\r\n  is_dirty: {},\r\n  app_memory_region: {:?},\r\n  regions:\r\n",
838            self.id,
839            self.is_dirty.get(),
840            self.app_memory_region.get()
841        )?;
842
843        for (i, (tor_user_pmpcfg, start, end)) in self.regions.iter().enumerate() {
844            let pmpcfg = tor_user_pmpcfg.get_reg();
845            write!(
846                f,
847                "     #{:02}: start={:#010X}, end={:#010X}, cfg={:#04X} ({}) (-{}{}{})\r\n",
848                i,
849                *start as usize,
850                *end as usize,
851                pmpcfg.get(),
852                t(pmpcfg.is_set(pmpcfg_octet::a), "TOR", "OFF"),
853                t(pmpcfg.is_set(pmpcfg_octet::r), "r", "-"),
854                t(pmpcfg.is_set(pmpcfg_octet::w), "w", "-"),
855                t(pmpcfg.is_set(pmpcfg_octet::x), "x", "-"),
856            )?;
857        }
858
859        write!(f, " }}\r\n")?;
860        Ok(())
861    }
862}
863
864/// Adapter from a generic PMP implementation exposing TOR-type regions to the
865/// Tock [`mpu::MPU`] trait. See [`TORUserPMP`].
866pub struct PMPUserMPU<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> {
867    /// Monotonically increasing counter for allocated configurations, used to
868    /// assign unique IDs to `PMPUserMPUConfig` instances.
869    config_count: Cell<NonZeroUsize>,
870    /// The configuration that the PMP was last configured for. Used (along with
871    /// the `is_dirty` flag) to determine if PMP can skip writing the
872    /// configuration to hardware.
873    last_configured_for: OptionalCell<NonZeroUsize>,
874    /// Underlying hardware PMP implementation, exposing a number (up to
875    /// `P::MAX_REGIONS`) of memory protection regions with a 4-byte enforcement
876    /// granularity.
877    pub pmp: P,
878}
879
880impl<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> PMPUserMPU<MAX_REGIONS, P> {
881    pub fn new(pmp: P) -> Self {
882        // Assigning this constant here ensures evaluation of the const
883        // expression at compile time, and can thus be used to enforce
884        // compile-time assertions based on the desired PMP configuration.
885        #[allow(clippy::let_unit_value)]
886        let _: () = P::CONST_ASSERT_CHECK;
887
888        PMPUserMPU {
889            config_count: Cell::new(NonZeroUsize::MIN),
890            last_configured_for: OptionalCell::empty(),
891            pmp,
892        }
893    }
894}
895
896impl<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> kernel::platform::mpu::MPU
897    for PMPUserMPU<MAX_REGIONS, P>
898{
899    type MpuConfig = PMPUserMPUConfig<MAX_REGIONS>;
900
901    fn enable_app_mpu(&self) {
902        // TODO: This operation may fail when the PMP is not exclusively used
903        // for userspace. Instead of panicing, we should handle this case more
904        // gracefully and return an error in the `MPU` trait. Process
905        // infrastructure can then attempt to re-schedule the process later on,
906        // try to revoke some optional shared memory regions, or suspend the
907        // process.
908        self.pmp.enable_user_pmp().unwrap()
909    }
910
911    fn disable_app_mpu(&self) {
912        self.pmp.disable_user_pmp()
913    }
914
915    fn number_total_regions(&self) -> usize {
916        self.pmp.available_regions()
917    }
918
919    fn new_config(&self) -> Option<Self::MpuConfig> {
920        let id = self.config_count.get();
921        self.config_count.set(id.checked_add(1)?);
922
923        Some(PMPUserMPUConfig {
924            id,
925            regions: [(
926                TORUserPMPCFG::OFF,
927                core::ptr::null::<u8>(),
928                core::ptr::null::<u8>(),
929            ); MAX_REGIONS],
930            is_dirty: Cell::new(true),
931            app_memory_region: OptionalCell::empty(),
932        })
933    }
934
935    fn reset_config(&self, config: &mut Self::MpuConfig) {
936        config.regions.iter_mut().for_each(|region| {
937            *region = (
938                TORUserPMPCFG::OFF,
939                core::ptr::null::<u8>(),
940                core::ptr::null::<u8>(),
941            )
942        });
943        config.app_memory_region.clear();
944        config.is_dirty.set(true);
945    }
946
947    fn allocate_region(
948        &self,
949        unallocated_memory_start: *const u8,
950        unallocated_memory_size: usize,
951        min_region_size: usize,
952        permissions: mpu::Permissions,
953        config: &mut Self::MpuConfig,
954    ) -> Option<mpu::Region> {
955        // Find a free region slot. If we don't have one, abort early:
956        let region_num = config
957            .regions
958            .iter()
959            .enumerate()
960            .find(|(_i, (pmpcfg, _, _))| *pmpcfg == TORUserPMPCFG::OFF)
961            .map(|(i, _)| i)?;
962
963        // Now, meet the PMP TOR region constraints. For this, start with the
964        // provided start address and size, transform them to meet the
965        // constraints, and then check that we're still within the bounds of the
966        // provided values:
967        let mut start = unallocated_memory_start as usize;
968        let mut size = min_region_size;
969
970        // Region start always has to align to 4 bytes. Round up to a 4 byte
971        // boundary if required:
972        if start % 4 != 0 {
973            start += 4 - (start % 4);
974        }
975
976        // Region size always has to align to 4 bytes. Round up to a 4 byte
977        // boundary if required:
978        if size % 4 != 0 {
979            size += 4 - (size % 4);
980        }
981
982        // Regions must be at least 4 bytes in size.
983        if size < 4 {
984            size = 4;
985        }
986
987        // Now, check to see whether the adjusted start and size still meet the
988        // allocation constraints, namely ensure that
989        //
990        //     start + size <= unallocated_memory_start + unallocated_memory_size
991        if start + size > (unallocated_memory_start as usize) + unallocated_memory_size {
992            // We're overflowing the provided memory region, can't make
993            // allocation. Normally, we'd abort here.
994            //
995            // However, a previous implementation of this code was incorrect in
996            // that performed this check before adjusting the requested region
997            // size to meet PMP region layout constraints (4 byte alignment for
998            // start and end address). Existing applications whose end-address
999            // is aligned on a less than 4-byte bondary would thus be given
1000            // access to additional memory which should be inaccessible.
1001            // Unfortunately, we can't fix this without breaking existing
1002            // applications. Thus, we perform the same insecure hack here, and
1003            // give the apps at most an extra 3 bytes of memory, as long as the
1004            // requested region as no write privileges.
1005            //
1006            // TODO: Remove this logic with as part of
1007            // https://github.com/tock/tock/issues/3544
1008            let writeable = match permissions {
1009                mpu::Permissions::ReadWriteExecute => true,
1010                mpu::Permissions::ReadWriteOnly => true,
1011                mpu::Permissions::ReadExecuteOnly => false,
1012                mpu::Permissions::ReadOnly => false,
1013                mpu::Permissions::ExecuteOnly => false,
1014            };
1015
1016            if writeable
1017                || (start + size
1018                    > (unallocated_memory_start as usize) + unallocated_memory_size + 3)
1019            {
1020                return None;
1021            }
1022        }
1023
1024        // Finally, check that this new region does not overlap with any
1025        // existing configured userspace region:
1026        for region in config.regions.iter() {
1027            if region.0 != TORUserPMPCFG::OFF && region_overlaps(region, start as *const u8, size) {
1028                return None;
1029            }
1030        }
1031
1032        // All checks passed, store region allocation and mark config as dirty:
1033        config.regions[region_num] = (
1034            permissions.into(),
1035            start as *const u8,
1036            (start + size) as *const u8,
1037        );
1038        config.is_dirty.set(true);
1039
1040        Some(mpu::Region::new(start as *const u8, size))
1041    }
1042
1043    fn remove_memory_region(
1044        &self,
1045        region: mpu::Region,
1046        config: &mut Self::MpuConfig,
1047    ) -> Result<(), ()> {
1048        let index = config
1049            .regions
1050            .iter()
1051            .enumerate()
1052            .find(|(_i, r)| {
1053                // `start as usize + size` in lieu of a safe pointer offset method
1054                r.0 != TORUserPMPCFG::OFF
1055                    && core::ptr::eq(r.1, region.start_address())
1056                    && core::ptr::eq(
1057                        r.2,
1058                        (region.start_address() as usize + region.size()) as *const u8,
1059                    )
1060            })
1061            .map(|(i, _)| i)
1062            .ok_or(())?;
1063
1064        config.regions[index].0 = TORUserPMPCFG::OFF;
1065        config.is_dirty.set(true);
1066
1067        Ok(())
1068    }
1069
1070    fn allocate_app_memory_region(
1071        &self,
1072        unallocated_memory_start: *const u8,
1073        unallocated_memory_size: usize,
1074        min_memory_size: usize,
1075        initial_app_memory_size: usize,
1076        initial_kernel_memory_size: usize,
1077        permissions: mpu::Permissions,
1078        config: &mut Self::MpuConfig,
1079    ) -> Option<(*const u8, usize)> {
1080        // An app memory region can only be allocated once per `MpuConfig`.
1081        // If we already have one, abort:
1082        if config.app_memory_region.is_some() {
1083            return None;
1084        }
1085
1086        // Find a free region slot. If we don't have one, abort early:
1087        let region_num = config
1088            .regions
1089            .iter()
1090            .enumerate()
1091            .find(|(_i, (pmpcfg, _, _))| *pmpcfg == TORUserPMPCFG::OFF)
1092            .map(|(i, _)| i)?;
1093
1094        // Now, meet the PMP TOR region constraints for the region specified by
1095        // `initial_app_memory_size` (which is the part of the region actually
1096        // protected by the PMP). For this, start with the provided start
1097        // address and size, transform them to meet the constraints, and then
1098        // check that we're still within the bounds of the provided values:
1099        let mut start = unallocated_memory_start as usize;
1100        let mut pmp_region_size = initial_app_memory_size;
1101
1102        // Region start always has to align to 4 bytes. Round up to a 4 byte
1103        // boundary if required:
1104        if start % 4 != 0 {
1105            start += 4 - (start % 4);
1106        }
1107
1108        // Region size always has to align to 4 bytes. Round up to a 4 byte
1109        // boundary if required:
1110        if pmp_region_size % 4 != 0 {
1111            pmp_region_size += 4 - (pmp_region_size % 4);
1112        }
1113
1114        // Regions must be at least 4 bytes in size.
1115        if pmp_region_size < 4 {
1116            pmp_region_size = 4;
1117        }
1118
1119        // We need to provide a memory block that fits both the initial app and
1120        // kernel memory sections, and is `min_memory_size` bytes
1121        // long. Calculate the length of this block with our new PMP-aliged
1122        // size:
1123        let memory_block_size = cmp::max(
1124            min_memory_size,
1125            pmp_region_size + initial_kernel_memory_size,
1126        );
1127
1128        // Now, check to see whether the adjusted start and size still meet the
1129        // allocation constraints, namely ensure that
1130        //
1131        //     start + memory_block_size
1132        //         <= unallocated_memory_start + unallocated_memory_size
1133        //
1134        // , which ensures the PMP constraints didn't push us over the bounds of
1135        // the provided memory region, and we can fit the entire allocation as
1136        // requested by the kernel:
1137        if start + memory_block_size > (unallocated_memory_start as usize) + unallocated_memory_size
1138        {
1139            // Overflowing the provided memory region, can't make allocation:
1140            return None;
1141        }
1142
1143        // Finally, check that this new region does not overlap with any
1144        // existing configured userspace region:
1145        for region in config.regions.iter() {
1146            if region.0 != TORUserPMPCFG::OFF
1147                && region_overlaps(region, start as *const u8, memory_block_size)
1148            {
1149                return None;
1150            }
1151        }
1152
1153        // All checks passed, store region allocation, indicate the
1154        // app_memory_region, and mark config as dirty:
1155        config.regions[region_num] = (
1156            permissions.into(),
1157            start as *const u8,
1158            (start + pmp_region_size) as *const u8,
1159        );
1160        config.is_dirty.set(true);
1161        config.app_memory_region.replace(region_num);
1162
1163        Some((start as *const u8, memory_block_size))
1164    }
1165
1166    fn update_app_memory_region(
1167        &self,
1168        app_memory_break: *const u8,
1169        kernel_memory_break: *const u8,
1170        permissions: mpu::Permissions,
1171        config: &mut Self::MpuConfig,
1172    ) -> Result<(), ()> {
1173        let region_num = config.app_memory_region.get().ok_or(())?;
1174
1175        let mut app_memory_break = app_memory_break as usize;
1176        let kernel_memory_break = kernel_memory_break as usize;
1177
1178        // Ensure that the requested app_memory_break complies with PMP
1179        // alignment constraints, namely that the region's end address is 4 byte
1180        // aligned:
1181        if app_memory_break % 4 != 0 {
1182            app_memory_break += 4 - (app_memory_break % 4);
1183        }
1184
1185        // Check if the app has run out of memory:
1186        if app_memory_break > kernel_memory_break {
1187            return Err(());
1188        }
1189
1190        // If we're not out of memory, update the region configuration
1191        // accordingly:
1192        config.regions[region_num].0 = permissions.into();
1193        config.regions[region_num].2 = app_memory_break as *const u8;
1194        config.is_dirty.set(true);
1195
1196        Ok(())
1197    }
1198
1199    fn configure_mpu(&self, config: &Self::MpuConfig) {
1200        if !self.last_configured_for.contains(&config.id) || config.is_dirty.get() {
1201            self.pmp.configure_pmp(&config.regions).unwrap();
1202            config.is_dirty.set(false);
1203            self.last_configured_for.set(config.id);
1204        }
1205    }
1206}
1207
1208#[cfg(test)]
1209pub mod tor_user_pmp_test {
1210    use super::{TORUserPMP, TORUserPMPCFG};
1211
1212    struct MockTORUserPMP;
1213    impl<const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS> for MockTORUserPMP {
1214        // Don't require any const-assertions in the MockTORUserPMP.
1215        const CONST_ASSERT_CHECK: () = ();
1216
1217        fn available_regions(&self) -> usize {
1218            // For the MockTORUserPMP, we always assume to have the full number
1219            // of MPU_REGIONS available. More advanced tests may want to return
1220            // a different number here (to simulate kernel memory protection)
1221            // and make the configuration fail at runtime, for instance.
1222            MPU_REGIONS
1223        }
1224
1225        fn configure_pmp(
1226            &self,
1227            _regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1228        ) -> Result<(), ()> {
1229            Ok(())
1230        }
1231
1232        fn enable_user_pmp(&self) -> Result<(), ()> {
1233            Ok(())
1234        } // The kernel's MPU trait requires
1235
1236        fn disable_user_pmp(&self) {}
1237    }
1238
1239    // TODO: implement more test cases, such as:
1240    //
1241    // - Try to update the app memory break with an invalid pointer below its
1242    //   allocation's start address.
1243
1244    #[test]
1245    fn test_mpu_region_no_overlap() {
1246        use crate::pmp::PMPUserMPU;
1247        use kernel::platform::mpu::{Permissions, MPU};
1248
1249        let mpu: PMPUserMPU<8, MockTORUserPMP> = PMPUserMPU::new(MockTORUserPMP);
1250        let mut config = mpu
1251            .new_config()
1252            .expect("Failed to allocate the first MPU config");
1253
1254        // Allocate a region which spans from 0x40000000 to 0x80000000 (this
1255        // meets PMP alignment constraints and will work on 32-bit and 64-bit
1256        // systems)
1257        let region_0 = mpu
1258            .allocate_region(
1259                0x40000000 as *const u8,
1260                0x40000000,
1261                0x40000000,
1262                Permissions::ReadWriteOnly,
1263                &mut config,
1264            )
1265            .expect(
1266                "Failed to allocate a well-aligned R/W MPU region with \
1267                 unallocated_memory_size == min_region_size",
1268            );
1269        assert!(region_0.start_address() == 0x40000000 as *const u8);
1270        assert!(region_0.size() == 0x40000000);
1271
1272        // Try to allocate a region adjacent to `region_0`. This should work:
1273        let region_1 = mpu
1274            .allocate_region(
1275                0x80000000 as *const u8,
1276                0x10000000,
1277                0x10000000,
1278                Permissions::ReadExecuteOnly,
1279                &mut config,
1280            )
1281            .expect(
1282                "Failed to allocate a well-aligned R/W MPU region adjacent to \
1283                 another region",
1284            );
1285        assert!(region_1.start_address() == 0x80000000 as *const u8);
1286        assert!(region_1.size() == 0x10000000);
1287
1288        // Remove the previously allocated `region_1`:
1289        mpu.remove_memory_region(region_1, &mut config)
1290            .expect("Failed to remove valid MPU region allocation");
1291
1292        // Allocate another region which spans from 0xc0000000 to 0xd0000000
1293        // (this meets PMP alignment constraints and will work on 32-bit and
1294        // 64-bit systems), but this time allocate it using the
1295        // `allocate_app_memory_region` method. We want a region of `0x20000000`
1296        // bytes, but only the first `0x10000000` should be accessible to the
1297        // app.
1298        let (region_2_start, region_2_size) = mpu
1299            .allocate_app_memory_region(
1300                0xc0000000 as *const u8,
1301                0x20000000,
1302                0x20000000,
1303                0x10000000,
1304                0x08000000,
1305                Permissions::ReadWriteOnly,
1306                &mut config,
1307            )
1308            .expect(
1309                "Failed to allocate a well-aligned R/W app memory MPU region \
1310                 with unallocated_memory_size == min_region_size",
1311            );
1312        assert!(region_2_start == 0xc0000000 as *const u8);
1313        assert!(region_2_size == 0x20000000);
1314
1315        // --> General overlap tests involving both regions
1316
1317        // Now, try to allocate another region that spans over both memory
1318        // regions. This should fail.
1319        assert!(mpu
1320            .allocate_region(
1321                0x40000000 as *const u8,
1322                0xc0000000,
1323                0xc0000000,
1324                Permissions::ReadOnly,
1325                &mut config,
1326            )
1327            .is_none());
1328
1329        // Try to allocate a region that spans over parts of both memory
1330        // regions. This should fail.
1331        assert!(mpu
1332            .allocate_region(
1333                0x48000000 as *const u8,
1334                0x80000000,
1335                0x80000000,
1336                Permissions::ReadOnly,
1337                &mut config,
1338            )
1339            .is_none());
1340
1341        // --> Overlap tests involving a single region (region_0)
1342        //
1343        // We define these in an array, such that we can run the tests with the
1344        // `region_0` defined (to confirm that the allocations are indeed
1345        // refused), and with `region_0` removed (to make sure they would work
1346        // in general).
1347        let overlap_region_0_tests = [
1348            (
1349                // Try to allocate a region that is contained within
1350                // `region_0`. This should fail.
1351                0x41000000 as *const u8,
1352                0x01000000,
1353                0x01000000,
1354                Permissions::ReadWriteOnly,
1355            ),
1356            (
1357                // Try to allocate a region that overlaps with `region_0` in the
1358                // front. This should fail.
1359                0x38000000 as *const u8,
1360                0x10000000,
1361                0x10000000,
1362                Permissions::ReadWriteExecute,
1363            ),
1364            (
1365                // Try to allocate a region that overlaps with `region_0` in the
1366                // back. This should fail.
1367                0x48000000 as *const u8,
1368                0x10000000,
1369                0x10000000,
1370                Permissions::ExecuteOnly,
1371            ),
1372            (
1373                // Try to allocate a region that spans over `region_0`. This
1374                // should fail.
1375                0x38000000 as *const u8,
1376                0x20000000,
1377                0x20000000,
1378                Permissions::ReadWriteOnly,
1379            ),
1380        ];
1381
1382        // Make sure that the allocation requests fail with `region_0` defined:
1383        for (memory_start, memory_size, length, perms) in overlap_region_0_tests.iter() {
1384            assert!(mpu
1385                .allocate_region(*memory_start, *memory_size, *length, *perms, &mut config,)
1386                .is_none());
1387        }
1388
1389        // Now, remove `region_0` and re-run the tests. Every test-case should
1390        // succeed now (in isolation, hence removing the successful allocations):
1391        mpu.remove_memory_region(region_0, &mut config)
1392            .expect("Failed to remove valid MPU region allocation");
1393
1394        for region @ (memory_start, memory_size, length, perms) in overlap_region_0_tests.iter() {
1395            let allocation_res =
1396                mpu.allocate_region(*memory_start, *memory_size, *length, *perms, &mut config);
1397
1398            match allocation_res {
1399                Some(region) => {
1400                    mpu.remove_memory_region(region, &mut config)
1401                        .expect("Failed to remove valid MPU region allocation");
1402                }
1403                None => {
1404                    panic!(
1405                        "Failed to allocate region that does not overlap and should meet alignment constraints: {:?}",
1406                        region
1407                    );
1408                }
1409            }
1410        }
1411
1412        // Make sure we can technically allocate a memory region that overlaps
1413        // with the kernel part of the `app_memory_region`.
1414        //
1415        // It is unclear whether this should be supported.
1416        let region_2 = mpu
1417            .allocate_region(
1418                0xd0000000 as *const u8,
1419                0x10000000,
1420                0x10000000,
1421                Permissions::ReadWriteOnly,
1422                &mut config,
1423            )
1424            .unwrap();
1425        assert!(region_2.start_address() == 0xd0000000 as *const u8);
1426        assert!(region_2.size() == 0x10000000);
1427
1428        // Now, we can grow the app memory break into this region:
1429        mpu.update_app_memory_region(
1430            0xd0000004 as *const u8,
1431            0xd8000000 as *const u8,
1432            Permissions::ReadWriteOnly,
1433            &mut config,
1434        )
1435        .expect("Failed to grow the app memory region into an existing other MPU region");
1436
1437        // Now, we have two overlapping MPU regions. Remove `region_2`, and try
1438        // to reallocate it as `region_3`. This should fail now, demonstrating
1439        // that we managed to reach an invalid intermediate state:
1440        mpu.remove_memory_region(region_2, &mut config)
1441            .expect("Failed to remove valid MPU region allocation");
1442        assert!(mpu
1443            .allocate_region(
1444                0xd0000000 as *const u8,
1445                0x10000000,
1446                0x10000000,
1447                Permissions::ReadWriteOnly,
1448                &mut config,
1449            )
1450            .is_none());
1451    }
1452}
1453
1454pub mod simple {
1455    use super::{pmpcfg_octet, TORUserPMP, TORUserPMPCFG};
1456    use crate::csr;
1457    use core::fmt;
1458    use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1459
1460    /// A "simple" RISC-V PMP implementation.
1461    ///
1462    /// The SimplePMP does not support locked regions, kernel memory protection,
1463    /// or any ePMP features (using the mseccfg CSR). It is generic over the
1464    /// number of hardware PMP regions available. `AVAILABLE_ENTRIES` is
1465    /// expected to be set to the number of available entries.
1466    ///
1467    /// [`SimplePMP`] implements [`TORUserPMP`] to expose all of its regions as
1468    /// "top of range" (TOR) regions (each taking up two physical PMP entires)
1469    /// for use as a user-mode memory protection mechanism.
1470    ///
1471    /// Notably, [`SimplePMP`] implements `TORUserPMP<MPU_REGIONS>` over a
1472    /// generic `MPU_REGIONS` where `MPU_REGIONS <= (AVAILABLE_ENTRIES / 2)`. As
1473    /// PMP re-configuration can have a significiant runtime overhead, users are
1474    /// free to specify a small `MPU_REGIONS` const-generic parameter to reduce
1475    /// the runtime overhead induced through PMP configuration, at the cost of
1476    /// having less PMP regions available to use for userspace memory
1477    /// protection.
1478    pub struct SimplePMP<const AVAILABLE_ENTRIES: usize>;
1479
1480    impl<const AVAILABLE_ENTRIES: usize> SimplePMP<AVAILABLE_ENTRIES> {
1481        pub unsafe fn new() -> Result<Self, ()> {
1482            // The SimplePMP does not support locked regions, kernel memory
1483            // protection, or any ePMP features (using the mseccfg CSR). Ensure
1484            // that we don't find any locked regions. If we don't have locked
1485            // regions and can still successfully execute code, this means that
1486            // we're not in the ePMP machine-mode lockdown mode, and can treat
1487            // our hardware as a regular PMP.
1488            //
1489            // Furthermore, we test whether we can use each entry (i.e. whether
1490            // it actually exists in HW) by flipping the RWX bits. If we can't
1491            // flip them, then `AVAILABLE_ENTRIES` is incorrect.  However, this
1492            // is not sufficient to check for locked regions, because of the
1493            // ePMP's rule-lock-bypass bit. If a rule is locked, it might be the
1494            // reason why we can execute code or read-write data in machine mode
1495            // right now. Thus, never try to touch a locked region, as we might
1496            // well revoke access to a kernel region!
1497            for i in 0..AVAILABLE_ENTRIES {
1498                // Read the entry's CSR:
1499                let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1500
1501                // Extract the entry's pmpcfg octet:
1502                let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1503                    pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1504                );
1505
1506                // As outlined above, we never touch a locked region. Thus, bail
1507                // out if it's locked:
1508                if pmpcfg.is_set(pmpcfg_octet::l) {
1509                    return Err(());
1510                }
1511
1512                // Now that it's not locked, we can be sure that regardless of
1513                // any ePMP bits, this region is either ignored or entirely
1514                // denied for machine-mode access. Hence, we can change it in
1515                // arbitrary ways without breaking our own memory access. Try to
1516                // flip the R/W/X bits:
1517                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1518
1519                // Check if the CSR changed:
1520                if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1521                    // Didn't change! This means that this region is not backed
1522                    // by HW. Return an error as `AVAILABLE_ENTRIES` is
1523                    // incorrect:
1524                    return Err(());
1525                }
1526
1527                // Finally, turn the region off:
1528                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1529            }
1530
1531            // Hardware PMP is verified to be in a compatible mode / state, and
1532            // has at least `AVAILABLE_ENTRIES` entries.
1533            Ok(SimplePMP)
1534        }
1535    }
1536
1537    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1538        for SimplePMP<AVAILABLE_ENTRIES>
1539    {
1540        // Ensure that the MPU_REGIONS (starting at entry, and occupying two
1541        // entries per region) don't overflow the available entires.
1542        const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= (AVAILABLE_ENTRIES / 2));
1543
1544        fn available_regions(&self) -> usize {
1545            // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1546            // support locked regions, or kernel protection.
1547            MPU_REGIONS
1548        }
1549
1550        // This implementation is specific for 32-bit systems. We use
1551        // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1552        // on 64-bit systems as well. However, this implementation will not work
1553        // on RV64I systems, due to the changed pmpcfgX CSR layout.
1554        fn configure_pmp(
1555            &self,
1556            regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1557        ) -> Result<(), ()> {
1558            // Could use `iter_array_chunks` once that's stable.
1559            let mut regions_iter = regions.iter();
1560            let mut i = 0;
1561
1562            while let Some(even_region) = regions_iter.next() {
1563                let odd_region_opt = regions_iter.next();
1564
1565                if let Some(odd_region) = odd_region_opt {
1566                    // We can configure two regions at once which, given that we
1567                    // start at index 0 (an even offset), translates to a single
1568                    // CSR write for the pmpcfgX register:
1569                    csr::CSR.pmpconfig_set(
1570                        i / 2,
1571                        u32::from_be_bytes([
1572                            odd_region.0.get(),
1573                            TORUserPMPCFG::OFF.get(),
1574                            even_region.0.get(),
1575                            TORUserPMPCFG::OFF.get(),
1576                        ]) as usize,
1577                    );
1578
1579                    // Now, set the addresses of the respective regions, if they
1580                    // are enabled, respectively:
1581                    if even_region.0 != TORUserPMPCFG::OFF {
1582                        csr::CSR
1583                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1584                        csr::CSR
1585                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1586                    }
1587
1588                    if odd_region.0 != TORUserPMPCFG::OFF {
1589                        csr::CSR
1590                            .pmpaddr_set(i * 2 + 2, (odd_region.1 as usize).overflowing_shr(2).0);
1591                        csr::CSR
1592                            .pmpaddr_set(i * 2 + 3, (odd_region.2 as usize).overflowing_shr(2).0);
1593                    }
1594
1595                    i += 2;
1596                } else {
1597                    // TODO: check overhead of code
1598                    // Modify the first two pmpcfgX octets for this region:
1599                    csr::CSR.pmpconfig_modify(
1600                        i / 2,
1601                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1602                            0x0000FFFF,
1603                            0,
1604                            u32::from_be_bytes([
1605                                0,
1606                                0,
1607                                even_region.0.get(),
1608                                TORUserPMPCFG::OFF.get(),
1609                            ]) as usize,
1610                        ),
1611                    );
1612
1613                    // Set the addresses if the region is enabled:
1614                    if even_region.0 != TORUserPMPCFG::OFF {
1615                        csr::CSR
1616                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1617                        csr::CSR
1618                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1619                    }
1620
1621                    i += 1;
1622                }
1623            }
1624
1625            Ok(())
1626        }
1627
1628        fn enable_user_pmp(&self) -> Result<(), ()> {
1629            // No-op. The SimplePMP does not have any kernel-enforced regions.
1630            Ok(())
1631        }
1632
1633        fn disable_user_pmp(&self) {
1634            // No-op. The SimplePMP does not have any kernel-enforced regions.
1635        }
1636    }
1637
1638    impl<const AVAILABLE_ENTRIES: usize> fmt::Display for SimplePMP<AVAILABLE_ENTRIES> {
1639        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1640            write!(f, " PMP hardware configuration -- entries: \r\n")?;
1641            unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }
1642        }
1643    }
1644}
1645
1646pub mod kernel_protection {
1647    use super::{pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG};
1648    use crate::csr;
1649    use core::fmt;
1650    use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1651
1652    // ---------- Kernel memory-protection PMP memory region wrapper types -----
1653    //
1654    // These types exist primarily to avoid argument confusion in the
1655    // [`KernelProtectionPMP`] constructor, which accepts the addresses of these
1656    // memory regions as arguments. They further encode whether a region must
1657    // adhere to the `NAPOT` or `TOR` addressing mode constraints:
1658
1659    /// The flash memory region address range.
1660    ///
1661    /// Configured in the PMP as a `NAPOT` region.
1662    #[derive(Copy, Clone, Debug)]
1663    pub struct FlashRegion(pub NAPOTRegionSpec);
1664
1665    /// The RAM region address range.
1666    ///
1667    /// Configured in the PMP as a `NAPOT` region.
1668    #[derive(Copy, Clone, Debug)]
1669    pub struct RAMRegion(pub NAPOTRegionSpec);
1670
1671    /// The MMIO region address range.
1672    ///
1673    /// Configured in the PMP as a `NAPOT` region.
1674    #[derive(Copy, Clone, Debug)]
1675    pub struct MMIORegion(pub NAPOTRegionSpec);
1676
1677    /// The PMP region specification for the kernel `.text` section.
1678    ///
1679    /// This is to be made accessible to machine-mode as read-execute.
1680    /// Configured in the PMP as a `TOR` region.
1681    #[derive(Copy, Clone, Debug)]
1682    pub struct KernelTextRegion(pub TORRegionSpec);
1683
1684    /// A RISC-V PMP implementation which supports machine-mode (kernel) memory
1685    /// protection, with a fixed number of "kernel regions" (such as `.text`,
1686    /// flash, RAM and MMIO).
1687    ///
1688    /// This implementation will configure the PMP in the following way:
1689    ///
1690    ///   ```text
1691    ///   |-------+-----------------------------------------+-------+---+-------|
1692    ///   | ENTRY | REGION / ADDR                           | MODE  | L | PERMS |
1693    ///   |-------+-----------------------------------------+-------+---+-------|
1694    ///   |     0 | /                                     \ | OFF   |   |       |
1695    ///   |     1 | \ Userspace TOR region #0             / | TOR   |   | ????? |
1696    ///   |       |                                         |       |   |       |
1697    ///   |     2 | /                                     \ | OFF   |   |       |
1698    ///   |     3 | \ Userspace TOR region #1             / | TOR   |   | ????? |
1699    ///   |       |                                         |       |   |       |
1700    ///   | 4 ... | /                                     \ |       |   |       |
1701    ///   | n - 8 | \ Userspace TOR region #x             / |       |   |       |
1702    ///   |       |                                         |       |   |       |
1703    ///   | n - 7 | "Deny-all" user-mode rule (all memory)  | NAPOT |   | ----- |
1704    ///   |       |                                         |       |   |       |
1705    ///   | n - 6 | --------------------------------------- | OFF   | X | ----- |
1706    ///   | n - 5 | Kernel .text section                    | TOR   | X | R/X   |
1707    ///   |       |                                         |       |   |       |
1708    ///   | n - 4 | FLASH (spanning kernel & apps)          | NAPOT | X | R     |
1709    ///   |       |                                         |       |   |       |
1710    ///   | n - 3 | RAM (spanning kernel & apps)            | NAPOT | X | R/W   |
1711    ///   |       |                                         |       |   |       |
1712    ///   | n - 2 | MMIO                                    | NAPOT | X | R/W   |
1713    ///   |       |                                         |       |   |       |
1714    ///   | n - 1 | "Deny-all" machine-mode    (all memory) | NAPOT | X | ----- |
1715    ///   |-------+-----------------------------------------+-------+---+-------|
1716    ///   ```
1717    ///
1718    /// This implementation does not use any `mseccfg` protection bits (ePMP
1719    /// functionality). To protect machine-mode (kernel) memory regions, regions
1720    /// must be marked as locked. However, locked regions apply to both user-
1721    /// and machine-mode. Thus, region `n - 7` serves as a "deny-all" user-mode
1722    /// rule, which prohibits all accesses not explicitly allowed through rules
1723    /// `< n - 7`. Kernel memory is made accessible underneath this "deny-all"
1724    /// region, which does not apply to machine-mode.
1725    ///
1726    /// This PMP implementation supports the [`TORUserPMP`] interface with
1727    /// `MPU_REGIONS <= ((AVAILABLE_ENTRIES - 7) / 2)`, to leave sufficient
1728    /// space for the "deny-all" and kernel regions. This constraint is enforced
1729    /// through the [`KernelProtectionPMP::CONST_ASSERT_CHECK`] associated
1730    /// constant, which MUST be evaluated by the consumer of the [`TORUserPMP`]
1731    /// trait (usually the [`PMPUserMPU`](super::PMPUserMPU) implementation).
1732    pub struct KernelProtectionPMP<const AVAILABLE_ENTRIES: usize>;
1733
1734    impl<const AVAILABLE_ENTRIES: usize> KernelProtectionPMP<AVAILABLE_ENTRIES> {
1735        pub unsafe fn new(
1736            flash: FlashRegion,
1737            ram: RAMRegion,
1738            mmio: MMIORegion,
1739            kernel_text: KernelTextRegion,
1740        ) -> Result<Self, ()> {
1741            for i in 0..AVAILABLE_ENTRIES {
1742                // Read the entry's CSR:
1743                let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1744
1745                // Extract the entry's pmpcfg octet:
1746                let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1747                    pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1748                );
1749
1750                // As outlined above, we never touch a locked region. Thus, bail
1751                // out if it's locked:
1752                if pmpcfg.is_set(pmpcfg_octet::l) {
1753                    return Err(());
1754                }
1755
1756                // Now that it's not locked, we can be sure that regardless of
1757                // any ePMP bits, this region is either ignored or entirely
1758                // denied for machine-mode access. Hence, we can change it in
1759                // arbitrary ways without breaking our own memory access. Try to
1760                // flip the R/W/X bits:
1761                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1762
1763                // Check if the CSR changed:
1764                if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1765                    // Didn't change! This means that this region is not backed
1766                    // by HW. Return an error as `AVAILABLE_ENTRIES` is
1767                    // incorrect:
1768                    return Err(());
1769                }
1770
1771                // Finally, turn the region off:
1772                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1773            }
1774
1775            // -----------------------------------------------------------------
1776            // Hardware PMP is verified to be in a compatible mode & state, and
1777            // has at least `AVAILABLE_ENTRIES` entries.
1778            // -----------------------------------------------------------------
1779
1780            // Now we need to set up the various kernel memory protection
1781            // regions, and the deny-all userspace region (n - 8), never
1782            // modified.
1783
1784            // Helper to modify an arbitrary PMP entry. Because we don't know
1785            // AVAILABLE_ENTRIES in advance, there's no good way to
1786            // optimize this further.
1787            fn write_pmpaddr_pmpcfg(i: usize, pmpcfg: u8, pmpaddr: usize) {
1788                csr::CSR.pmpaddr_set(i, pmpaddr);
1789                csr::CSR.pmpconfig_modify(
1790                    i / 4,
1791                    FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1792                        0x000000FF_usize,
1793                        (i % 4) * 8,
1794                        u32::from_be_bytes([0, 0, 0, pmpcfg]) as usize,
1795                    ),
1796                );
1797            }
1798
1799            // Set the kernel `.text`, flash, RAM and MMIO regions, in no
1800            // particular order, with the exception of `.text` and flash:
1801            // `.text` must precede flash, as otherwise we'd be revoking execute
1802            // permissions temporarily. Given that we can currently execute
1803            // code, this should not have any impact on our accessible memory,
1804            // assuming that the provided regions are not otherwise aliased.
1805
1806            // MMIO at n - 2:
1807            write_pmpaddr_pmpcfg(
1808                AVAILABLE_ENTRIES - 2,
1809                (pmpcfg_octet::a::NAPOT
1810                    + pmpcfg_octet::r::SET
1811                    + pmpcfg_octet::w::SET
1812                    + pmpcfg_octet::x::CLEAR
1813                    + pmpcfg_octet::l::SET)
1814                    .into(),
1815                mmio.0.pmpaddr(),
1816            );
1817
1818            // RAM at n - 3:
1819            write_pmpaddr_pmpcfg(
1820                AVAILABLE_ENTRIES - 3,
1821                (pmpcfg_octet::a::NAPOT
1822                    + pmpcfg_octet::r::SET
1823                    + pmpcfg_octet::w::SET
1824                    + pmpcfg_octet::x::CLEAR
1825                    + pmpcfg_octet::l::SET)
1826                    .into(),
1827                ram.0.pmpaddr(),
1828            );
1829
1830            // `.text` at n - 6 and n - 5 (TOR region):
1831            write_pmpaddr_pmpcfg(
1832                AVAILABLE_ENTRIES - 6,
1833                (pmpcfg_octet::a::OFF
1834                    + pmpcfg_octet::r::CLEAR
1835                    + pmpcfg_octet::w::CLEAR
1836                    + pmpcfg_octet::x::CLEAR
1837                    + pmpcfg_octet::l::SET)
1838                    .into(),
1839                kernel_text.0.pmpaddr_a(),
1840            );
1841            write_pmpaddr_pmpcfg(
1842                AVAILABLE_ENTRIES - 5,
1843                (pmpcfg_octet::a::TOR
1844                    + pmpcfg_octet::r::SET
1845                    + pmpcfg_octet::w::CLEAR
1846                    + pmpcfg_octet::x::SET
1847                    + pmpcfg_octet::l::SET)
1848                    .into(),
1849                kernel_text.0.pmpaddr_b(),
1850            );
1851
1852            // flash at n - 4:
1853            write_pmpaddr_pmpcfg(
1854                AVAILABLE_ENTRIES - 4,
1855                (pmpcfg_octet::a::NAPOT
1856                    + pmpcfg_octet::r::SET
1857                    + pmpcfg_octet::w::CLEAR
1858                    + pmpcfg_octet::x::CLEAR
1859                    + pmpcfg_octet::l::SET)
1860                    .into(),
1861                flash.0.pmpaddr(),
1862            );
1863
1864            // Now that the kernel has explicit region definitions for any
1865            // memory that it needs to have access to, we can deny other memory
1866            // accesses in our very last rule (n - 1):
1867            write_pmpaddr_pmpcfg(
1868                AVAILABLE_ENTRIES - 1,
1869                (pmpcfg_octet::a::NAPOT
1870                    + pmpcfg_octet::r::CLEAR
1871                    + pmpcfg_octet::w::CLEAR
1872                    + pmpcfg_octet::x::CLEAR
1873                    + pmpcfg_octet::l::SET)
1874                    .into(),
1875                // the entire address space:
1876                0x7FFFFFFF,
1877            );
1878
1879            // Finally, we configure the non-locked user-mode deny all
1880            // rule. This must never be removed, or otherwise usermode will be
1881            // able to access all locked regions (which are supposed to be
1882            // exclusively accessible to kernel-mode):
1883            write_pmpaddr_pmpcfg(
1884                AVAILABLE_ENTRIES - 7,
1885                (pmpcfg_octet::a::NAPOT
1886                    + pmpcfg_octet::r::CLEAR
1887                    + pmpcfg_octet::w::CLEAR
1888                    + pmpcfg_octet::x::CLEAR
1889                    + pmpcfg_octet::l::CLEAR)
1890                    .into(),
1891                // the entire address space:
1892                0x7FFFFFFF,
1893            );
1894
1895            // Setup complete
1896            Ok(KernelProtectionPMP)
1897        }
1898    }
1899
1900    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1901        for KernelProtectionPMP<AVAILABLE_ENTRIES>
1902    {
1903        /// Ensure that the MPU_REGIONS (starting at entry, and occupying two
1904        /// entries per region) don't overflow the available entires, excluding
1905        /// the 7 entires used for implementing the kernel memory protection.
1906        const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= ((AVAILABLE_ENTRIES - 7) / 2));
1907
1908        fn available_regions(&self) -> usize {
1909            // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1910            // support locking additional regions at runtime.
1911            MPU_REGIONS
1912        }
1913
1914        // This implementation is specific for 32-bit systems. We use
1915        // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1916        // on 64-bit systems as well. However, this implementation will not work
1917        // on RV64I systems, due to the changed pmpcfgX CSR layout.
1918        fn configure_pmp(
1919            &self,
1920            regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1921        ) -> Result<(), ()> {
1922            // Could use `iter_array_chunks` once that's stable.
1923            let mut regions_iter = regions.iter();
1924            let mut i = 0;
1925
1926            while let Some(even_region) = regions_iter.next() {
1927                let odd_region_opt = regions_iter.next();
1928
1929                if let Some(odd_region) = odd_region_opt {
1930                    // We can configure two regions at once which, given that we
1931                    // start at index 0 (an even offset), translates to a single
1932                    // CSR write for the pmpcfgX register:
1933                    csr::CSR.pmpconfig_set(
1934                        i / 2,
1935                        u32::from_be_bytes([
1936                            odd_region.0.get(),
1937                            TORUserPMPCFG::OFF.get(),
1938                            even_region.0.get(),
1939                            TORUserPMPCFG::OFF.get(),
1940                        ]) as usize,
1941                    );
1942
1943                    // Now, set the addresses of the respective regions, if they
1944                    // are enabled, respectively:
1945                    if even_region.0 != TORUserPMPCFG::OFF {
1946                        csr::CSR
1947                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1948                        csr::CSR
1949                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1950                    }
1951
1952                    if odd_region.0 != TORUserPMPCFG::OFF {
1953                        csr::CSR
1954                            .pmpaddr_set(i * 2 + 2, (odd_region.1 as usize).overflowing_shr(2).0);
1955                        csr::CSR
1956                            .pmpaddr_set(i * 2 + 3, (odd_region.2 as usize).overflowing_shr(2).0);
1957                    }
1958
1959                    i += 2;
1960                } else {
1961                    // Modify the first two pmpcfgX octets for this region:
1962                    csr::CSR.pmpconfig_modify(
1963                        i / 2,
1964                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1965                            0x0000FFFF,
1966                            0,
1967                            u32::from_be_bytes([
1968                                0,
1969                                0,
1970                                even_region.0.get(),
1971                                TORUserPMPCFG::OFF.get(),
1972                            ]) as usize,
1973                        ),
1974                    );
1975
1976                    // Set the addresses if the region is enabled:
1977                    if even_region.0 != TORUserPMPCFG::OFF {
1978                        csr::CSR
1979                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1980                        csr::CSR
1981                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1982                    }
1983
1984                    i += 1;
1985                }
1986            }
1987
1988            Ok(())
1989        }
1990
1991        fn enable_user_pmp(&self) -> Result<(), ()> {
1992            // No-op. User-mode regions are never enforced in machine-mode, and
1993            // thus can be configured direct and may stay enabled in
1994            // machine-mode.
1995            Ok(())
1996        }
1997
1998        fn disable_user_pmp(&self) {
1999            // No-op. User-mode regions are never enforced in machine-mode, and
2000            // thus can be configured direct and may stay enabled in
2001            // machine-mode.
2002        }
2003    }
2004
2005    impl<const AVAILABLE_ENTRIES: usize> fmt::Display for KernelProtectionPMP<AVAILABLE_ENTRIES> {
2006        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2007            write!(f, " PMP hardware configuration -- entries: \r\n")?;
2008            unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }
2009        }
2010    }
2011}
2012
2013pub mod kernel_protection_mml_epmp {
2014    use super::{pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG};
2015    use crate::csr;
2016    use core::cell::Cell;
2017    use core::fmt;
2018    use kernel::platform::mpu;
2019    use kernel::utilities::registers::interfaces::{Readable, Writeable};
2020    use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
2021
2022    // ---------- Kernel memory-protection PMP memory region wrapper types -----
2023    //
2024    // These types exist primarily to avoid argument confusion in the
2025    // [`KernelProtectionMMLEPMP`] constructor, which accepts the addresses of
2026    // these memory regions as arguments. They further encode whether a region
2027    // must adhere to the `NAPOT` or `TOR` addressing mode constraints:
2028
2029    /// The flash memory region address range.
2030    ///
2031    /// Configured in the PMP as a `NAPOT` region.
2032    #[derive(Copy, Clone, Debug)]
2033    pub struct FlashRegion(pub NAPOTRegionSpec);
2034
2035    /// The RAM region address range.
2036    ///
2037    /// Configured in the PMP as a `NAPOT` region.
2038    #[derive(Copy, Clone, Debug)]
2039    pub struct RAMRegion(pub NAPOTRegionSpec);
2040
2041    /// The MMIO region address range.
2042    ///
2043    /// Configured in the PMP as a `NAPOT` region.
2044    #[derive(Copy, Clone, Debug)]
2045    pub struct MMIORegion(pub NAPOTRegionSpec);
2046
2047    /// The PMP region specification for the kernel `.text` section.
2048    ///
2049    /// This is to be made accessible to machine-mode as read-execute.
2050    /// Configured in the PMP as a `TOR` region.
2051    #[derive(Copy, Clone, Debug)]
2052    pub struct KernelTextRegion(pub TORRegionSpec);
2053
2054    /// A RISC-V ePMP implementation.
2055    ///
2056    /// Supports machine-mode (kernel) memory protection by using the
2057    /// machine-mode lockdown mode (MML), with a fixed number of
2058    /// "kernel regions" (such as `.text`, flash, RAM and MMIO).
2059    ///
2060    /// This implementation will configure the ePMP in the following way:
2061    ///
2062    /// - `mseccfg` CSR:
2063    ///   ```text
2064    ///   |-------------+-----------------------------------------------+-------|
2065    ///   | MSECCFG BIT | LABEL                                         | STATE |
2066    ///   |-------------+-----------------------------------------------+-------|
2067    ///   |           0 | Machine-Mode Lockdown (MML)                   |     1 |
2068    ///   |           1 | Machine-Mode Whitelist Policy (MMWP)          |     1 |
2069    ///   |           2 | Rule-Lock Bypass (RLB)                        |     0 |
2070    ///   |-------------+-----------------------------------------------+-------|
2071    ///   ```
2072    ///
2073    /// - `pmpaddrX` / `pmpcfgX` CSRs:
2074    ///   ```text
2075    ///   |-------+-----------------------------------------+-------+---+-------|
2076    ///   | ENTRY | REGION / ADDR                           | MODE  | L | PERMS |
2077    ///   |-------+-----------------------------------------+-------+---+-------|
2078    ///   |     0 | --------------------------------------- | OFF   | X | ----- |
2079    ///   |     1 | Kernel .text section                    | TOR   | X | R/X   |
2080    ///   |       |                                         |       |   |       |
2081    ///   |     2 | /                                     \ | OFF   |   |       |
2082    ///   |     3 | \ Userspace TOR region #0             / | TOR   |   | ????? |
2083    ///   |       |                                         |       |   |       |
2084    ///   |     4 | /                                     \ | OFF   |   |       |
2085    ///   |     5 | \ Userspace TOR region #1             / | TOR   |   | ????? |
2086    ///   |       |                                         |       |   |       |
2087    ///   | 6 ... | /                                     \ |       |   |       |
2088    ///   | n - 4 | \ Userspace TOR region #x             / |       |   |       |
2089    ///   |       |                                         |       |   |       |
2090    ///   | n - 3 | FLASH (spanning kernel & apps)          | NAPOT | X | R     |
2091    ///   |       |                                         |       |   |       |
2092    ///   | n - 2 | RAM (spanning kernel & apps)            | NAPOT | X | R/W   |
2093    ///   |       |                                         |       |   |       |
2094    ///   | n - 1 | MMIO                                    | NAPOT | X | R/W   |
2095    ///   |-------+-----------------------------------------+-------+---+-------|
2096    ///   ```
2097    ///
2098    /// Crucially, this implementation relies on an unconfigured hardware PMP
2099    /// implementing the ePMP (`mseccfg` CSR) extension, providing the Machine
2100    /// Lockdown Mode (MML) security bit. This bit is required to ensure that
2101    /// any machine-mode (kernel) protection regions (lock bit set) are only
2102    /// accessible to kernel mode.
2103    pub struct KernelProtectionMMLEPMP<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> {
2104        user_pmp_enabled: Cell<bool>,
2105        shadow_user_pmpcfgs: [Cell<TORUserPMPCFG>; MPU_REGIONS],
2106    }
2107
2108    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize>
2109        KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2110    {
2111        // Start user-mode TOR regions after the first kernel .text region:
2112        const TOR_REGIONS_OFFSET: usize = 1;
2113
2114        pub unsafe fn new(
2115            flash: FlashRegion,
2116            ram: RAMRegion,
2117            mmio: MMIORegion,
2118            kernel_text: KernelTextRegion,
2119        ) -> Result<Self, ()> {
2120            for i in 0..AVAILABLE_ENTRIES {
2121                // Read the entry's CSR:
2122                let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
2123
2124                // Extract the entry's pmpcfg octet:
2125                let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
2126                    pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
2127                );
2128
2129                // As outlined above, we never touch a locked region. Thus, bail
2130                // out if it's locked:
2131                if pmpcfg.is_set(pmpcfg_octet::l) {
2132                    return Err(());
2133                }
2134
2135                // Now that it's not locked, we can be sure that regardless of
2136                // any ePMP bits, this region is either ignored or entirely
2137                // denied for machine-mode access. Hence, we can change it in
2138                // arbitrary ways without breaking our own memory access. Try to
2139                // flip the R/W/X bits:
2140                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
2141
2142                // Check if the CSR changed:
2143                if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
2144                    // Didn't change! This means that this region is not backed
2145                    // by HW. Return an error as `AVAILABLE_ENTRIES` is
2146                    // incorrect:
2147                    return Err(());
2148                }
2149
2150                // Finally, turn the region off:
2151                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
2152            }
2153
2154            // -----------------------------------------------------------------
2155            // Hardware PMP is verified to be in a compatible mode & state, and
2156            // has at least `AVAILABLE_ENTRIES` entries. We have not yet checked
2157            // whether the PMP is actually an _e_PMP. However, we don't want to
2158            // produce a gadget to set RLB, and so the only safe way to test
2159            // this is to set up the PMP regions and then try to enable the
2160            // mseccfg bits.
2161            // -----------------------------------------------------------------
2162
2163            // Helper to modify an arbitrary PMP entry. Because we don't know
2164            // AVAILABLE_ENTRIES in advance, there's no good way to
2165            // optimize this further.
2166            fn write_pmpaddr_pmpcfg(i: usize, pmpcfg: u8, pmpaddr: usize) {
2167                // Important to set the address first. Locking the pmpcfg
2168                // register will also lock the adress register!
2169                csr::CSR.pmpaddr_set(i, pmpaddr);
2170                csr::CSR.pmpconfig_modify(
2171                    i / 4,
2172                    FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2173                        0x000000FF_usize,
2174                        (i % 4) * 8,
2175                        u32::from_be_bytes([0, 0, 0, pmpcfg]) as usize,
2176                    ),
2177                );
2178            }
2179
2180            // Set the kernel `.text`, flash, RAM and MMIO regions, in no
2181            // particular order, with the exception of `.text` and flash:
2182            // `.text` must precede flash, as otherwise we'd be revoking execute
2183            // permissions temporarily. Given that we can currently execute
2184            // code, this should not have any impact on our accessible memory,
2185            // assuming that the provided regions are not otherwise aliased.
2186
2187            // `.text` at n - 5 and n - 4 (TOR region):
2188            write_pmpaddr_pmpcfg(
2189                0,
2190                (pmpcfg_octet::a::OFF
2191                    + pmpcfg_octet::r::CLEAR
2192                    + pmpcfg_octet::w::CLEAR
2193                    + pmpcfg_octet::x::CLEAR
2194                    + pmpcfg_octet::l::SET)
2195                    .into(),
2196                kernel_text.0.pmpaddr_a(),
2197            );
2198            write_pmpaddr_pmpcfg(
2199                1,
2200                (pmpcfg_octet::a::TOR
2201                    + pmpcfg_octet::r::SET
2202                    + pmpcfg_octet::w::CLEAR
2203                    + pmpcfg_octet::x::SET
2204                    + pmpcfg_octet::l::SET)
2205                    .into(),
2206                kernel_text.0.pmpaddr_b(),
2207            );
2208
2209            // MMIO at n - 1:
2210            write_pmpaddr_pmpcfg(
2211                AVAILABLE_ENTRIES - 1,
2212                (pmpcfg_octet::a::NAPOT
2213                    + pmpcfg_octet::r::SET
2214                    + pmpcfg_octet::w::SET
2215                    + pmpcfg_octet::x::CLEAR
2216                    + pmpcfg_octet::l::SET)
2217                    .into(),
2218                mmio.0.pmpaddr(),
2219            );
2220
2221            // RAM at n - 2:
2222            write_pmpaddr_pmpcfg(
2223                AVAILABLE_ENTRIES - 2,
2224                (pmpcfg_octet::a::NAPOT
2225                    + pmpcfg_octet::r::SET
2226                    + pmpcfg_octet::w::SET
2227                    + pmpcfg_octet::x::CLEAR
2228                    + pmpcfg_octet::l::SET)
2229                    .into(),
2230                ram.0.pmpaddr(),
2231            );
2232
2233            // flash at n - 3:
2234            write_pmpaddr_pmpcfg(
2235                AVAILABLE_ENTRIES - 3,
2236                (pmpcfg_octet::a::NAPOT
2237                    + pmpcfg_octet::r::SET
2238                    + pmpcfg_octet::w::CLEAR
2239                    + pmpcfg_octet::x::CLEAR
2240                    + pmpcfg_octet::l::SET)
2241                    .into(),
2242                flash.0.pmpaddr(),
2243            );
2244
2245            // Finally, attempt to enable the MSECCFG security bits, and verify
2246            // that they have been set correctly. If they have not been set to
2247            // the written value, this means that this hardware either does not
2248            // support ePMP, or it was in some invalid state otherwise. We don't
2249            // need to read back the above regions, as we previous verified that
2250            // none of their entries were locked -- so writing to them must work
2251            // even without RLB set.
2252            //
2253            // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
2254            csr::CSR.mseccfg.set(0x00000003);
2255
2256            // Read back the MSECCFG CSR to ensure that the machine's security
2257            // configuration was set properly. If this fails, we have set up the
2258            // PMP in a way that would give userspace access to kernel
2259            // space. The caller of this method must appropriately handle this
2260            // error condition by ensuring that the platform will never execute
2261            // userspace code!
2262            if csr::CSR.mseccfg.get() != 0x00000003 {
2263                return Err(());
2264            }
2265
2266            // Setup complete
2267            const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
2268            Ok(KernelProtectionMMLEPMP {
2269                user_pmp_enabled: Cell::new(false),
2270                shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; MPU_REGIONS],
2271            })
2272        }
2273    }
2274
2275    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
2276        for KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2277    {
2278        // Ensure that the MPU_REGIONS (starting at entry, and occupying two
2279        // entries per region) don't overflow the available entires, excluding
2280        // the 7 entries used for implementing the kernel memory protection:
2281        const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= ((AVAILABLE_ENTRIES - 5) / 2));
2282
2283        fn available_regions(&self) -> usize {
2284            // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
2285            // support locking additional regions at runtime.
2286            MPU_REGIONS
2287        }
2288
2289        // This implementation is specific for 32-bit systems. We use
2290        // `u32::from_be_bytes` and then cast to usize, as it manages to compile
2291        // on 64-bit systems as well. However, this implementation will not work
2292        // on RV64I systems, due to the changed pmpcfgX CSR layout.
2293        fn configure_pmp(
2294            &self,
2295            regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
2296        ) -> Result<(), ()> {
2297            // Configure all of the regions' addresses and store their pmpcfg octets
2298            // in our shadow storage. If the user PMP is already enabled, we further
2299            // apply this configuration (set the pmpcfgX CSRs) by running
2300            // `enable_user_pmp`:
2301            for (i, (region, shadow_user_pmpcfg)) in regions
2302                .iter()
2303                .zip(self.shadow_user_pmpcfgs.iter())
2304                .enumerate()
2305            {
2306                // The ePMP in MML mode does not support read-write-execute
2307                // regions. If such a region is to be configured, abort. As this
2308                // loop here only modifies the shadow state, we can simply abort and
2309                // return an error. We don't make any promises about the ePMP state
2310                // if the configuration files, but it is still being activated with
2311                // `enable_user_pmp`:
2312                if region.0.get()
2313                    == <TORUserPMPCFG as From<mpu::Permissions>>::from(
2314                        mpu::Permissions::ReadWriteExecute,
2315                    )
2316                    .get()
2317                {
2318                    return Err(());
2319                }
2320
2321                // Set the CSR addresses for this region (if its not OFF, in which
2322                // case the hardware-configured addresses are irrelevant):
2323                if region.0 != TORUserPMPCFG::OFF {
2324                    csr::CSR.pmpaddr_set(
2325                        (i + Self::TOR_REGIONS_OFFSET) * 2 + 0,
2326                        (region.1 as usize).overflowing_shr(2).0,
2327                    );
2328                    csr::CSR.pmpaddr_set(
2329                        (i + Self::TOR_REGIONS_OFFSET) * 2 + 1,
2330                        (region.2 as usize).overflowing_shr(2).0,
2331                    );
2332                }
2333
2334                // Store the region's pmpcfg octet:
2335                shadow_user_pmpcfg.set(region.0);
2336            }
2337
2338            // If the PMP is currently active, apply the changes to the CSRs:
2339            if self.user_pmp_enabled.get() {
2340                self.enable_user_pmp()?;
2341            }
2342
2343            Ok(())
2344        }
2345
2346        fn enable_user_pmp(&self) -> Result<(), ()> {
2347            // We store the "enabled" PMPCFG octets of user regions in the
2348            // `shadow_user_pmpcfg` field, such that we can re-enable the PMP
2349            // without a call to `configure_pmp` (where the `TORUserPMPCFG`s are
2350            // provided by the caller).
2351
2352            // Could use `iter_array_chunks` once that's stable.
2353            let mut shadow_user_pmpcfgs_iter = self.shadow_user_pmpcfgs.iter();
2354            let mut i = Self::TOR_REGIONS_OFFSET;
2355
2356            while let Some(first_region_pmpcfg) = shadow_user_pmpcfgs_iter.next() {
2357                // If we're at a "region" offset divisible by two (where "region" =
2358                // 2 PMP "entries"), then we can configure an entire `pmpcfgX` CSR
2359                // in one operation. As CSR writes are expensive, this is an
2360                // operation worth making:
2361                let second_region_opt = if i % 2 == 0 {
2362                    shadow_user_pmpcfgs_iter.next()
2363                } else {
2364                    None
2365                };
2366
2367                if let Some(second_region_pmpcfg) = second_region_opt {
2368                    // We're at an even index and have two regions to configure, so
2369                    // do that with a single CSR write:
2370                    csr::CSR.pmpconfig_set(
2371                        i / 2,
2372                        u32::from_be_bytes([
2373                            second_region_pmpcfg.get().get(),
2374                            TORUserPMPCFG::OFF.get(),
2375                            first_region_pmpcfg.get().get(),
2376                            TORUserPMPCFG::OFF.get(),
2377                        ]) as usize,
2378                    );
2379
2380                    i += 2;
2381                } else if i % 2 == 0 {
2382                    // This is a single region at an even index. Thus, modify the
2383                    // first two pmpcfgX octets for this region.
2384                    csr::CSR.pmpconfig_modify(
2385                        i / 2,
2386                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2387                            0x0000FFFF,
2388                            0, // lower two octets
2389                            u32::from_be_bytes([
2390                                0,
2391                                0,
2392                                first_region_pmpcfg.get().get(),
2393                                TORUserPMPCFG::OFF.get(),
2394                            ]) as usize,
2395                        ),
2396                    );
2397
2398                    i += 1;
2399                } else {
2400                    // This is a single region at an odd index. Thus, modify the
2401                    // latter two pmpcfgX octets for this region.
2402                    csr::CSR.pmpconfig_modify(
2403                        i / 2,
2404                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2405                            0x0000FFFF,
2406                            16, // higher two octets
2407                            u32::from_be_bytes([
2408                                0,
2409                                0,
2410                                first_region_pmpcfg.get().get(),
2411                                TORUserPMPCFG::OFF.get(),
2412                            ]) as usize,
2413                        ),
2414                    );
2415
2416                    i += 1;
2417                }
2418            }
2419
2420            self.user_pmp_enabled.set(true);
2421
2422            Ok(())
2423        }
2424
2425        fn disable_user_pmp(&self) {
2426            // Simply set all of the user-region pmpcfg octets to OFF:
2427
2428            let mut user_region_pmpcfg_octet_pairs =
2429                (Self::TOR_REGIONS_OFFSET)..(Self::TOR_REGIONS_OFFSET + MPU_REGIONS);
2430            while let Some(first_region_idx) = user_region_pmpcfg_octet_pairs.next() {
2431                let second_region_opt = if first_region_idx % 2 == 0 {
2432                    user_region_pmpcfg_octet_pairs.next()
2433                } else {
2434                    None
2435                };
2436
2437                if let Some(_second_region_idx) = second_region_opt {
2438                    // We're at an even index and have two regions to configure, so
2439                    // do that with a single CSR write:
2440                    csr::CSR.pmpconfig_set(
2441                        first_region_idx / 2,
2442                        u32::from_be_bytes([
2443                            TORUserPMPCFG::OFF.get(),
2444                            TORUserPMPCFG::OFF.get(),
2445                            TORUserPMPCFG::OFF.get(),
2446                            TORUserPMPCFG::OFF.get(),
2447                        ]) as usize,
2448                    );
2449                } else if first_region_idx % 2 == 0 {
2450                    // This is a single region at an even index. Thus, modify the
2451                    // first two pmpcfgX octets for this region.
2452                    csr::CSR.pmpconfig_modify(
2453                        first_region_idx / 2,
2454                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2455                            0x0000FFFF,
2456                            0, // lower two octets
2457                            u32::from_be_bytes([
2458                                0,
2459                                0,
2460                                TORUserPMPCFG::OFF.get(),
2461                                TORUserPMPCFG::OFF.get(),
2462                            ]) as usize,
2463                        ),
2464                    );
2465                } else {
2466                    // This is a single region at an odd index. Thus, modify the
2467                    // latter two pmpcfgX octets for this region.
2468                    csr::CSR.pmpconfig_modify(
2469                        first_region_idx / 2,
2470                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2471                            0x0000FFFF,
2472                            16, // higher two octets
2473                            u32::from_be_bytes([
2474                                0,
2475                                0,
2476                                TORUserPMPCFG::OFF.get(),
2477                                TORUserPMPCFG::OFF.get(),
2478                            ]) as usize,
2479                        ),
2480                    );
2481                }
2482            }
2483
2484            self.user_pmp_enabled.set(false);
2485        }
2486    }
2487
2488    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> fmt::Display
2489        for KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2490    {
2491        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2492            write!(
2493                f,
2494                " ePMP configuration:\r\n  mseccfg: {:#08X}, user-mode PMP active: {:?}, entries:\r\n",
2495                csr::CSR.mseccfg.get(),
2496                self.user_pmp_enabled.get()
2497            )?;
2498            unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }?;
2499
2500            write!(f, "  Shadow PMP entries for user-mode:\r\n")?;
2501            for (i, shadowed_pmpcfg) in self.shadow_user_pmpcfgs.iter().enumerate() {
2502                let (start_pmpaddr_label, startaddr_pmpaddr, endaddr, mode) =
2503                    if shadowed_pmpcfg.get() == TORUserPMPCFG::OFF {
2504                        (
2505                            "pmpaddr",
2506                            csr::CSR.pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2),
2507                            0,
2508                            "OFF",
2509                        )
2510                    } else {
2511                        (
2512                            "  start",
2513                            csr::CSR
2514                                .pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2)
2515                                .overflowing_shl(2)
2516                                .0,
2517                            csr::CSR
2518                                .pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2 + 1)
2519                                .overflowing_shl(2)
2520                                .0
2521                                | 0b11,
2522                            "TOR",
2523                        )
2524                    };
2525
2526                write!(
2527                    f,
2528                    "  [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}  ) ({}{}{}{})\r\n",
2529                    (i + Self::TOR_REGIONS_OFFSET) * 2 + 1,
2530                    start_pmpaddr_label,
2531                    startaddr_pmpaddr,
2532                    endaddr,
2533                    shadowed_pmpcfg.get().get(),
2534                    mode,
2535                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::l) {
2536                        "l"
2537                    } else {
2538                        "-"
2539                    },
2540                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::r) {
2541                        "r"
2542                    } else {
2543                        "-"
2544                    },
2545                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::w) {
2546                        "w"
2547                    } else {
2548                        "-"
2549                    },
2550                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::x) {
2551                        "x"
2552                    } else {
2553                        "-"
2554                    },
2555                )?;
2556            }
2557
2558            Ok(())
2559        }
2560    }
2561}