rv32i/
pmp.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5use core::cell::Cell;
6use core::num::NonZeroUsize;
7use core::ops::Range;
8use core::{cmp, fmt};
9
10use kernel::platform::mpu;
11use kernel::utilities::cells::OptionalCell;
12use kernel::utilities::registers::{register_bitfields, LocalRegisterCopy};
13
14use crate::csr;
15
16register_bitfields![u8,
17    /// Generic `pmpcfg` octet.
18    ///
19    /// A PMP entry is configured through `pmpaddrX` and `pmpcfgX` CSRs, where a
20    /// single `pmpcfgX` CSRs holds multiple octets, each affecting the access
21    /// permission, addressing mode and "lock" attributes of a single `pmpaddrX`
22    /// CSR. This bitfield definition represents a single, `u8`-backed `pmpcfg`
23    /// octet affecting a single `pmpaddr` entry.
24    pub pmpcfg_octet [
25        r OFFSET(0) NUMBITS(1) [],
26        w OFFSET(1) NUMBITS(1) [],
27        x OFFSET(2) NUMBITS(1) [],
28        a OFFSET(3) NUMBITS(2) [
29            OFF = 0,
30            TOR = 1,
31            NA4 = 2,
32            NAPOT = 3
33        ],
34        l OFFSET(7) NUMBITS(1) []
35    ]
36];
37
38/// A `pmpcfg` octet for a user-mode (non-locked) TOR-addressed PMP region.
39///
40/// This is a wrapper around a [`pmpcfg_octet`] (`u8`) register type, which
41/// guarantees that the wrapped `pmpcfg` octet is always set to be either
42/// [`TORUserPMPCFG::OFF`] (set to `0x00`), or in a non-locked, TOR-addressed
43/// configuration.
44///
45/// By accepting this type, PMP implements can rely on the above properties to
46/// hold by construction and avoid runtime checks. For example, this type is
47/// used in the [`TORUserPMP::configure_pmp`] method.
48#[derive(Copy, Clone, Debug)]
49pub struct TORUserPMPCFG(LocalRegisterCopy<u8, pmpcfg_octet::Register>);
50
51impl TORUserPMPCFG {
52    pub const OFF: TORUserPMPCFG = TORUserPMPCFG(LocalRegisterCopy::new(0));
53
54    /// Extract the `u8` representation of the [`pmpcfg_octet`] register.
55    pub fn get(&self) -> u8 {
56        self.0.get()
57    }
58
59    /// Extract a copy of the contained [`pmpcfg_octet`] register.
60    pub fn get_reg(&self) -> LocalRegisterCopy<u8, pmpcfg_octet::Register> {
61        self.0
62    }
63}
64
65impl PartialEq<TORUserPMPCFG> for TORUserPMPCFG {
66    fn eq(&self, other: &Self) -> bool {
67        self.0.get() == other.0.get()
68    }
69}
70
71impl Eq for TORUserPMPCFG {}
72
73impl From<mpu::Permissions> for TORUserPMPCFG {
74    fn from(p: mpu::Permissions) -> Self {
75        let fv = match p {
76            mpu::Permissions::ReadWriteExecute => {
77                pmpcfg_octet::r::SET + pmpcfg_octet::w::SET + pmpcfg_octet::x::SET
78            }
79            mpu::Permissions::ReadWriteOnly => {
80                pmpcfg_octet::r::SET + pmpcfg_octet::w::SET + pmpcfg_octet::x::CLEAR
81            }
82            mpu::Permissions::ReadExecuteOnly => {
83                pmpcfg_octet::r::SET + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::SET
84            }
85            mpu::Permissions::ReadOnly => {
86                pmpcfg_octet::r::SET + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::CLEAR
87            }
88            mpu::Permissions::ExecuteOnly => {
89                pmpcfg_octet::r::CLEAR + pmpcfg_octet::w::CLEAR + pmpcfg_octet::x::SET
90            }
91        };
92
93        TORUserPMPCFG(LocalRegisterCopy::new(
94            (fv + pmpcfg_octet::l::CLEAR + pmpcfg_octet::a::TOR).value,
95        ))
96    }
97}
98
99/// A RISC-V PMP memory region specification, configured in NAPOT mode.
100///
101/// This type checks that the supplied `start` and `size` values meet the RISC-V
102/// NAPOT requirements, namely that
103///
104/// - the region is a power of two bytes in size
105/// - the region's start address is aligned to the region size
106/// - the region is at least 8 bytes long
107///
108/// By accepting this type, PMP implementations can rely on these requirements
109/// to be verified. Furthermore, they can use the
110/// [`NAPOTRegionSpec::napot_addr`] convenience method to retrieve an `pmpaddrX`
111/// CSR value encoding this region's address and length.
112#[derive(Copy, Clone, Debug)]
113pub struct NAPOTRegionSpec {
114    start: *const u8,
115    size: usize,
116}
117
118impl NAPOTRegionSpec {
119    /// Construct a new [`NAPOTRegionSpec`]
120    ///
121    /// This method accepts a `start` address and a region length. It returns
122    /// `Some(region)` when all constraints specified in the
123    /// [`NAPOTRegionSpec`]'s documentation are satisfied, otherwise `None`.
124    pub fn new(start: *const u8, size: usize) -> Option<Self> {
125        if !size.is_power_of_two() || (start as usize) % size != 0 || size < 8 {
126            None
127        } else {
128            Some(NAPOTRegionSpec { start, size })
129        }
130    }
131
132    /// Retrieve the start address of this [`NAPOTRegionSpec`].
133    pub fn start(&self) -> *const u8 {
134        self.start
135    }
136
137    /// Retrieve the size of this [`NAPOTRegionSpec`].
138    pub fn size(&self) -> usize {
139        self.size
140    }
141
142    /// Retrieve the end address of this [`NAPOTRegionSpec`].
143    pub fn end(&self) -> *const u8 {
144        unsafe { self.start.add(self.size) }
145    }
146
147    /// Retrieve a `pmpaddrX`-CSR compatible representation of this
148    /// [`NAPOTRegionSpec`]'s address and length. For this value to be valid in
149    /// a `CSR` register, the `pmpcfgX` octet's `A` (address mode) value
150    /// belonging to this `pmpaddrX`-CSR must be set to `NAPOT` (0b11).
151    pub fn napot_addr(&self) -> usize {
152        ((self.start as usize) + (self.size - 1).overflowing_shr(1).0)
153            .overflowing_shr(2)
154            .0
155    }
156}
157
158/// A RISC-V PMP memory region specification, configured in TOR mode.
159///
160/// This type checks that the supplied `start` and `end` addresses meet the
161/// RISC-V TOR requirements, namely that
162///
163/// - the region's start address is aligned to a 4-byte boundary
164/// - the region's end address is aligned to a 4-byte boundary
165/// - the region is at least 4 bytes long
166///
167/// By accepting this type, PMP implementations can rely on these requirements
168/// to be verified.
169#[derive(Copy, Clone, Debug)]
170pub struct TORRegionSpec {
171    start: *const u8,
172    end: *const u8,
173}
174
175impl TORRegionSpec {
176    /// Construct a new [`TORRegionSpec`]
177    ///
178    /// This method accepts a `start` and `end` address. It returns
179    /// `Some(region)` when all constraints specified in the [`TORRegionSpec`]'s
180    /// documentation are satisfied, otherwise `None`.
181    pub fn new(start: *const u8, end: *const u8) -> Option<Self> {
182        if (start as usize) % 4 != 0
183            || (end as usize) % 4 != 0
184            || (end as usize)
185                .checked_sub(start as usize)
186                .is_none_or(|size| size < 4)
187        {
188            None
189        } else {
190            Some(TORRegionSpec { start, end })
191        }
192    }
193
194    /// Retrieve the start address of this [`TORRegionSpec`].
195    pub fn start(&self) -> *const u8 {
196        self.start
197    }
198
199    /// Retrieve the end address of this [`TORRegionSpec`].
200    pub fn end(&self) -> *const u8 {
201        self.end
202    }
203}
204
205/// Helper method to check if a [`PMPUserMPUConfig`] region overlaps with a
206/// region specified by `other_start` and `other_size`.
207///
208/// Matching the RISC-V spec this checks `pmpaddr[i-i] <= y < pmpaddr[i]` for TOR
209/// ranges.
210fn region_overlaps(
211    region: &(TORUserPMPCFG, *const u8, *const u8),
212    other_start: *const u8,
213    other_size: usize,
214) -> bool {
215    // PMP TOR regions are not inclusive on the high end, that is
216    //     pmpaddr[i-i] <= y < pmpaddr[i].
217    //
218    // This happens to coincide with the definition of the Rust half-open Range
219    // type, which provides a convenient `.contains()` method:
220    let region_range = Range {
221        start: region.1 as usize,
222        end: region.2 as usize,
223    };
224
225    let other_range = Range {
226        start: other_start as usize,
227        end: other_start as usize + other_size,
228    };
229
230    // For a range A to overlap with a range B, either B's first or B's last
231    // element must be contained in A, or A's first or A's last element must be
232    // contained in B. As we deal with half-open ranges, ensure that neither
233    // range is empty.
234    //
235    // This implementation is simple and stupid, and can be optimized. We leave
236    // that as an exercise to the compiler.
237    !region_range.is_empty()
238        && !other_range.is_empty()
239        && (region_range.contains(&other_range.start)
240            || region_range.contains(&(other_range.end - 1))
241            || other_range.contains(&region_range.start)
242            || other_range.contains(&(region_range.end - 1)))
243}
244
245/// Print a table of the configured PMP regions, read from  the HW CSRs.
246///
247/// # Safety
248///
249/// This function is unsafe, as it relies on the PMP CSRs to be accessible, and
250/// the hardware to feature `PHYSICAL_ENTRIES` PMP CSR entries. If these
251/// conditions are not met, calling this function can result in undefinied
252/// behavior (e.g., cause a system trap).
253pub unsafe fn format_pmp_entries<const PHYSICAL_ENTRIES: usize>(
254    f: &mut fmt::Formatter<'_>,
255) -> fmt::Result {
256    for i in 0..PHYSICAL_ENTRIES {
257        // Extract the entry's pmpcfgX register value. The pmpcfgX CSRs are
258        // tightly packed and contain 4 octets beloging to individual
259        // entries. Convert this into a u8-wide LocalRegisterCopy<u8,
260        // pmpcfg_octet> as a generic register type, independent of the entry's
261        // offset.
262        let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
263            csr::CSR
264                .pmpconfig_get(i / 4)
265                .overflowing_shr(((i % 4) * 8) as u32)
266                .0 as u8,
267        );
268
269        // The address interpretation is different for every mode. Return both a
270        // string indicating the PMP entry's mode, as well as the effective
271        // start and end address (inclusive) affected by the region. For regions
272        // that are OFF, we still want to expose the pmpaddrX register value --
273        // thus return the raw unshifted value as the addr, and 0 as the
274        // region's end.
275        let (start_label, start, end, mode) = match pmpcfg.read_as_enum(pmpcfg_octet::a) {
276            Some(pmpcfg_octet::a::Value::OFF) => {
277                let addr = csr::CSR.pmpaddr_get(i);
278                ("pmpaddr", addr, 0, "OFF  ")
279            }
280
281            Some(pmpcfg_octet::a::Value::TOR) => {
282                let start = if i > 0 {
283                    csr::CSR.pmpaddr_get(i - 1)
284                } else {
285                    0
286                };
287
288                (
289                    "  start",
290                    start.overflowing_shl(2).0,
291                    csr::CSR.pmpaddr_get(i).overflowing_shl(2).0.wrapping_sub(1),
292                    "TOR  ",
293                )
294            }
295
296            Some(pmpcfg_octet::a::Value::NA4) => {
297                let addr = csr::CSR.pmpaddr_get(i).overflowing_shl(2).0;
298                ("  start", addr, addr | 0b11, "NA4  ")
299            }
300
301            Some(pmpcfg_octet::a::Value::NAPOT) => {
302                let pmpaddr = csr::CSR.pmpaddr_get(i);
303                let encoded_size = pmpaddr.trailing_ones();
304                if (encoded_size as usize) < (core::mem::size_of_val(&pmpaddr) * 8 - 1) {
305                    let start = pmpaddr - ((1 << encoded_size) - 1);
306                    let end = start + (1 << (encoded_size + 1)) - 1;
307                    (
308                        "  start",
309                        start.overflowing_shl(2).0,
310                        end.overflowing_shl(2).0 | 0b11,
311                        "NAPOT",
312                    )
313                } else {
314                    ("  start", usize::MIN, usize::MAX, "NAPOT")
315                }
316            }
317
318            None => {
319                // We match on a 2-bit value with 4 variants, so this is
320                // unreachable. However, don't insert a panic in case this
321                // doesn't get optimized away:
322                ("", 0, 0, "")
323            }
324        };
325
326        // Ternary operator shortcut function, to avoid bulky formatting...
327        fn t<T>(cond: bool, a: T, b: T) -> T {
328            if cond {
329                a
330            } else {
331                b
332            }
333        }
334
335        write!(
336            f,
337            "  [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}) ({}{}{}{})\r\n",
338            i,
339            start_label,
340            start,
341            end,
342            pmpcfg.get(),
343            mode,
344            t(pmpcfg.is_set(pmpcfg_octet::l), "l", "-"),
345            t(pmpcfg.is_set(pmpcfg_octet::r), "r", "-"),
346            t(pmpcfg.is_set(pmpcfg_octet::w), "w", "-"),
347            t(pmpcfg.is_set(pmpcfg_octet::x), "x", "-"),
348        )?;
349    }
350
351    Ok(())
352}
353
354/// A RISC-V PMP implementation exposing a number of TOR memory protection
355/// regions to the [`PMPUserMPU`].
356///
357/// The RISC-V PMP is complex and can be used to enforce memory protection in
358/// various modes (Machine, Supervisor and User mode). Depending on the exact
359/// extension set present (e.g., ePMP) and the machine's security configuration
360/// bits, it may expose a vastly different set of constraints and application
361/// semantics.
362///
363/// Because we can't possibly capture all of this in a single readable,
364/// maintainable and efficient implementation, we implement a two-layer system:
365///
366/// - a [`TORUserPMP`] is a simple abstraction over some underlying PMP hardware
367///   implementation, which exposes an interface to configure regions that are
368///   active (enforced) in user-mode and can be configured for arbitrary
369///   addresses on a 4-byte granularity.
370///
371/// - the [`PMPUserMPU`] takes this abstraction and implements the Tock kernel's
372///   [`mpu::MPU`] trait. It worries about re-configuring memory protection when
373///   switching processes, allocating memory regions of an appropriate size,
374///   etc.
375///
376/// Implementors of a chip are free to define their own [`TORUserPMP`]
377/// implementations, adhering to their specific PMP layout & constraints,
378/// provided they implement this trait.
379///
380/// The `MAX_REGIONS` const generic is used to indicate the maximum number of
381/// TOR PMP regions available to the [`PMPUserMPU`]. The PMP implementation may
382/// provide less regions than indicated through `MAX_REGIONS`, for instance when
383/// entries are enforced (locked) in machine mode. The number of available
384/// regions may change at runtime. The current number of regions available to
385/// the [`PMPUserMPU`] is indicated by the [`TORUserPMP::available_regions`]
386/// method. However, when it is known that a number of regions are not available
387/// for userspace protection, `MAX_REGIONS` can be used to reduce the memory
388/// footprint allocated by stored PMP configurations, as well as the
389/// re-configuration overhead.
390pub trait TORUserPMP<const MAX_REGIONS: usize> {
391    /// A placeholder to define const-assertions which are evaluated in
392    /// [`PMPUserMPU::new`]. This can be used to, for instance, assert that the
393    /// number of userspace regions does not exceed the number of hardware
394    /// regions.
395    const CONST_ASSERT_CHECK: ();
396
397    /// The number of TOR regions currently available for userspace memory
398    /// protection. Within `[0; MAX_REGIONS]`.
399    ///
400    /// The PMP implementation may provide less regions than indicated through
401    /// `MAX_REGIONS`, for instance when entries are enforced (locked) in
402    /// machine mode. The number of available regions may change at runtime. The
403    /// implementation is free to map these regions to arbitrary PMP entries
404    /// (and change this mapping at runtime), provided that they are enforced
405    /// when the hart is in user-mode, and other memory regions are generally
406    /// inaccessible when in user-mode.
407    ///
408    /// When allocating regions for kernel-mode protection, and thus reducing
409    /// the number of regions available to userspace, re-configuring the PMP may
410    /// fail. This is allowed behavior. However, the PMP must not remove any
411    /// regions from the user-mode current configuration while it is active
412    /// ([`TORUserPMP::enable_user_pmp`] has been called, and it has not been
413    /// disabled through [`TORUserPMP::disable_user_pmp`]).
414    fn available_regions(&self) -> usize;
415
416    /// Configure the user-mode memory protection.
417    ///
418    /// This method configures the user-mode memory protection, to be enforced
419    /// on a call to [`TORUserPMP::enable_user_pmp`].
420    ///
421    /// PMP implementations where configured regions are only enforced in
422    /// user-mode may re-configure the PMP on this function invocation and
423    /// implement [`TORUserPMP::enable_user_pmp`] as a no-op. If configured
424    /// regions are enforced in machine-mode (for instance when using an ePMP
425    /// with the machine-mode whitelist policy), the new configuration rules
426    /// must not apply until [`TORUserPMP::enable_user_pmp`].
427    ///
428    /// The tuples as passed in the `regions` parameter are defined as follows:
429    ///
430    /// - first value ([`TORUserPMPCFG`]): the memory protection mode as
431    ///   enforced on the region. A `TORUserPMPCFG` can be created from the
432    ///   [`mpu::Permissions`] type. It is in a format compatible to the pmpcfgX
433    ///   register, guaranteed to not have the lock (`L`) bit set, and
434    ///   configured either as a TOR region (`A = 0b01`), or disabled (all bits
435    ///   set to `0`).
436    ///
437    /// - second value (`*const u8`): the region's start addres. As a PMP TOR
438    ///   region has a 4-byte address granularity, this address is rounded down
439    ///   to the next 4-byte boundary.
440    ///
441    /// - third value (`*const u8`): the region's end addres. As a PMP TOR
442    ///   region has a 4-byte address granularity, this address is rounded down
443    ///   to the next 4-byte boundary.
444    ///
445    /// To disable a region, set its configuration to [`TORUserPMPCFG::OFF`]. In
446    /// this case, the start and end addresses are ignored and can be set to
447    /// arbitrary values.
448    fn configure_pmp(
449        &self,
450        regions: &[(TORUserPMPCFG, *const u8, *const u8); MAX_REGIONS],
451    ) -> Result<(), ()>;
452
453    /// Enable the user-mode memory protection.
454    ///
455    /// Enables the memory protection for user-mode, as configured through
456    /// [`TORUserPMP::configure_pmp`]. Enabling the PMP for user-mode may make
457    /// the user-mode accessible regions inaccessible to the kernel. For PMP
458    /// implementations where configured regions are only enforced in user-mode,
459    /// this method may be implemented as a no-op.
460    ///
461    /// If enabling the current configuration is not possible (e.g., because
462    /// regions have been allocated to the kernel), this function must return
463    /// `Err(())`. Otherwise, this function returns `Ok(())`.
464    fn enable_user_pmp(&self) -> Result<(), ()>;
465
466    /// Disable the user-mode memory protection.
467    ///
468    /// Disables the memory protection for user-mode. If enabling the user-mode
469    /// memory protetion made user-mode accessible regions inaccessible to
470    /// machine-mode, this method should make these regions accessible again.
471    ///
472    /// For PMP implementations where configured regions are only enforced in
473    /// user-mode, this method may be implemented as a no-op. This method is not
474    /// responsible for making regions inaccessible to user-mode. If previously
475    /// configured regions must be made inaccessible,
476    /// [`TORUserPMP::configure_pmp`] must be used to re-configure the PMP
477    /// accordingly.
478    fn disable_user_pmp(&self);
479}
480
481/// Struct storing userspace memory protection regions for the [`PMPUserMPU`].
482pub struct PMPUserMPUConfig<const MAX_REGIONS: usize> {
483    /// PMP config identifier, as generated by the issuing PMP implementation.
484    id: NonZeroUsize,
485    /// Indicates if the configuration has changed since the last time it was
486    /// written to hardware.
487    is_dirty: Cell<bool>,
488    /// Array of MPU regions. Each region requires two physical PMP entries.
489    regions: [(TORUserPMPCFG, *const u8, *const u8); MAX_REGIONS],
490    /// Which region index (into the `regions` array above) is used
491    /// for app memory (if it has been configured).
492    app_memory_region: OptionalCell<usize>,
493}
494
495impl<const MAX_REGIONS: usize> fmt::Display for PMPUserMPUConfig<MAX_REGIONS> {
496    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
497        // Ternary operator shortcut function, to avoid bulky formatting...
498        fn t<T>(cond: bool, a: T, b: T) -> T {
499            if cond {
500                a
501            } else {
502                b
503            }
504        }
505
506        write!(
507            f,
508            " PMPUserMPUConfig {{\r\n  id: {},\r\n  is_dirty: {},\r\n  app_memory_region: {:?},\r\n  regions:\r\n",
509            self.id,
510            self.is_dirty.get(),
511            self.app_memory_region.get()
512        )?;
513
514        for (i, (tor_user_pmpcfg, start, end)) in self.regions.iter().enumerate() {
515            let pmpcfg = tor_user_pmpcfg.get_reg();
516            write!(
517                f,
518                "     #{:02}: start={:#010X}, end={:#010X}, cfg={:#04X} ({}) (-{}{}{})\r\n",
519                i,
520                *start as usize,
521                *end as usize,
522                pmpcfg.get(),
523                t(pmpcfg.is_set(pmpcfg_octet::a), "TOR", "OFF"),
524                t(pmpcfg.is_set(pmpcfg_octet::r), "r", "-"),
525                t(pmpcfg.is_set(pmpcfg_octet::w), "w", "-"),
526                t(pmpcfg.is_set(pmpcfg_octet::x), "x", "-"),
527            )?;
528        }
529
530        write!(f, " }}\r\n")?;
531        Ok(())
532    }
533}
534
535/// Adapter from a generic PMP implementation exposing TOR-type regions to the
536/// Tock [`mpu::MPU`] trait. See [`TORUserPMP`].
537pub struct PMPUserMPU<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> {
538    /// Monotonically increasing counter for allocated configurations, used to
539    /// assign unique IDs to `PMPUserMPUConfig` instances.
540    config_count: Cell<NonZeroUsize>,
541    /// The configuration that the PMP was last configured for. Used (along with
542    /// the `is_dirty` flag) to determine if PMP can skip writing the
543    /// configuration to hardware.
544    last_configured_for: OptionalCell<NonZeroUsize>,
545    /// Underlying hardware PMP implementation, exposing a number (up to
546    /// `P::MAX_REGIONS`) of memory protection regions with a 4-byte enforcement
547    /// granularity.
548    pub pmp: P,
549}
550
551impl<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> PMPUserMPU<MAX_REGIONS, P> {
552    pub fn new(pmp: P) -> Self {
553        // Assigning this constant here ensures evaluation of the const
554        // expression at compile time, and can thus be used to enforce
555        // compile-time assertions based on the desired PMP configuration.
556        #[allow(clippy::let_unit_value)]
557        let _: () = P::CONST_ASSERT_CHECK;
558
559        PMPUserMPU {
560            config_count: Cell::new(NonZeroUsize::MIN),
561            last_configured_for: OptionalCell::empty(),
562            pmp,
563        }
564    }
565}
566
567impl<const MAX_REGIONS: usize, P: TORUserPMP<MAX_REGIONS> + 'static> kernel::platform::mpu::MPU
568    for PMPUserMPU<MAX_REGIONS, P>
569{
570    type MpuConfig = PMPUserMPUConfig<MAX_REGIONS>;
571
572    fn enable_app_mpu(&self) {
573        // TODO: This operation may fail when the PMP is not exclusively used
574        // for userspace. Instead of panicing, we should handle this case more
575        // gracefully and return an error in the `MPU` trait. Process
576        // infrastructure can then attempt to re-schedule the process later on,
577        // try to revoke some optional shared memory regions, or suspend the
578        // process.
579        self.pmp.enable_user_pmp().unwrap()
580    }
581
582    fn disable_app_mpu(&self) {
583        self.pmp.disable_user_pmp()
584    }
585
586    fn number_total_regions(&self) -> usize {
587        self.pmp.available_regions()
588    }
589
590    fn new_config(&self) -> Option<Self::MpuConfig> {
591        let id = self.config_count.get();
592        self.config_count.set(id.checked_add(1)?);
593
594        Some(PMPUserMPUConfig {
595            id,
596            regions: [(
597                TORUserPMPCFG::OFF,
598                core::ptr::null::<u8>(),
599                core::ptr::null::<u8>(),
600            ); MAX_REGIONS],
601            is_dirty: Cell::new(true),
602            app_memory_region: OptionalCell::empty(),
603        })
604    }
605
606    fn reset_config(&self, config: &mut Self::MpuConfig) {
607        config.regions.iter_mut().for_each(|region| {
608            *region = (
609                TORUserPMPCFG::OFF,
610                core::ptr::null::<u8>(),
611                core::ptr::null::<u8>(),
612            )
613        });
614        config.app_memory_region.clear();
615        config.is_dirty.set(true);
616    }
617
618    fn allocate_region(
619        &self,
620        unallocated_memory_start: *const u8,
621        unallocated_memory_size: usize,
622        min_region_size: usize,
623        permissions: mpu::Permissions,
624        config: &mut Self::MpuConfig,
625    ) -> Option<mpu::Region> {
626        // Find a free region slot. If we don't have one, abort early:
627        let region_num = config
628            .regions
629            .iter()
630            .enumerate()
631            .find(|(_i, (pmpcfg, _, _))| *pmpcfg == TORUserPMPCFG::OFF)
632            .map(|(i, _)| i)?;
633
634        // Now, meet the PMP TOR region constraints. For this, start with the
635        // provided start address and size, transform them to meet the
636        // constraints, and then check that we're still within the bounds of the
637        // provided values:
638        let mut start = unallocated_memory_start as usize;
639        let mut size = min_region_size;
640
641        // Region start always has to align to 4 bytes. Round up to a 4 byte
642        // boundary if required:
643        if start % 4 != 0 {
644            start += 4 - (start % 4);
645        }
646
647        // Region size always has to align to 4 bytes. Round up to a 4 byte
648        // boundary if required:
649        if size % 4 != 0 {
650            size += 4 - (size % 4);
651        }
652
653        // Regions must be at least 4 bytes in size.
654        if size < 4 {
655            size = 4;
656        }
657
658        // Now, check to see whether the adjusted start and size still meet the
659        // allocation constraints, namely ensure that
660        //
661        //     start + size <= unallocated_memory_start + unallocated_memory_size
662        if start + size > (unallocated_memory_start as usize) + unallocated_memory_size {
663            // We're overflowing the provided memory region, can't make
664            // allocation. Normally, we'd abort here.
665            //
666            // However, a previous implementation of this code was incorrect in
667            // that performed this check before adjusting the requested region
668            // size to meet PMP region layout constraints (4 byte alignment for
669            // start and end address). Existing applications whose end-address
670            // is aligned on a less than 4-byte bondary would thus be given
671            // access to additional memory which should be inaccessible.
672            // Unfortunately, we can't fix this without breaking existing
673            // applications. Thus, we perform the same insecure hack here, and
674            // give the apps at most an extra 3 bytes of memory, as long as the
675            // requested region as no write privileges.
676            //
677            // TODO: Remove this logic with as part of
678            // https://github.com/tock/tock/issues/3544
679            let writeable = match permissions {
680                mpu::Permissions::ReadWriteExecute => true,
681                mpu::Permissions::ReadWriteOnly => true,
682                mpu::Permissions::ReadExecuteOnly => false,
683                mpu::Permissions::ReadOnly => false,
684                mpu::Permissions::ExecuteOnly => false,
685            };
686
687            if writeable
688                || (start + size
689                    > (unallocated_memory_start as usize) + unallocated_memory_size + 3)
690            {
691                return None;
692            }
693        }
694
695        // Finally, check that this new region does not overlap with any
696        // existing configured userspace region:
697        for region in config.regions.iter() {
698            if region.0 != TORUserPMPCFG::OFF && region_overlaps(region, start as *const u8, size) {
699                return None;
700            }
701        }
702
703        // All checks passed, store region allocation and mark config as dirty:
704        config.regions[region_num] = (
705            permissions.into(),
706            start as *const u8,
707            (start + size) as *const u8,
708        );
709        config.is_dirty.set(true);
710
711        Some(mpu::Region::new(start as *const u8, size))
712    }
713
714    fn remove_memory_region(
715        &self,
716        region: mpu::Region,
717        config: &mut Self::MpuConfig,
718    ) -> Result<(), ()> {
719        let index = config
720            .regions
721            .iter()
722            .enumerate()
723            .find(|(_i, r)| {
724                // `start as usize + size` in lieu of a safe pointer offset method
725                r.0 != TORUserPMPCFG::OFF
726                    && r.1 == region.start_address()
727                    && r.2 == (region.start_address() as usize + region.size()) as *const u8
728            })
729            .map(|(i, _)| i)
730            .ok_or(())?;
731
732        config.regions[index].0 = TORUserPMPCFG::OFF;
733        config.is_dirty.set(true);
734
735        Ok(())
736    }
737
738    fn allocate_app_memory_region(
739        &self,
740        unallocated_memory_start: *const u8,
741        unallocated_memory_size: usize,
742        min_memory_size: usize,
743        initial_app_memory_size: usize,
744        initial_kernel_memory_size: usize,
745        permissions: mpu::Permissions,
746        config: &mut Self::MpuConfig,
747    ) -> Option<(*const u8, usize)> {
748        // An app memory region can only be allocated once per `MpuConfig`.
749        // If we already have one, abort:
750        if config.app_memory_region.is_some() {
751            return None;
752        }
753
754        // Find a free region slot. If we don't have one, abort early:
755        let region_num = config
756            .regions
757            .iter()
758            .enumerate()
759            .find(|(_i, (pmpcfg, _, _))| *pmpcfg == TORUserPMPCFG::OFF)
760            .map(|(i, _)| i)?;
761
762        // Now, meet the PMP TOR region constraints for the region specified by
763        // `initial_app_memory_size` (which is the part of the region actually
764        // protected by the PMP). For this, start with the provided start
765        // address and size, transform them to meet the constraints, and then
766        // check that we're still within the bounds of the provided values:
767        let mut start = unallocated_memory_start as usize;
768        let mut pmp_region_size = initial_app_memory_size;
769
770        // Region start always has to align to 4 bytes. Round up to a 4 byte
771        // boundary if required:
772        if start % 4 != 0 {
773            start += 4 - (start % 4);
774        }
775
776        // Region size always has to align to 4 bytes. Round up to a 4 byte
777        // boundary if required:
778        if pmp_region_size % 4 != 0 {
779            pmp_region_size += 4 - (pmp_region_size % 4);
780        }
781
782        // Regions must be at least 4 bytes in size.
783        if pmp_region_size < 4 {
784            pmp_region_size = 4;
785        }
786
787        // We need to provide a memory block that fits both the initial app and
788        // kernel memory sections, and is `min_memory_size` bytes
789        // long. Calculate the length of this block with our new PMP-aliged
790        // size:
791        let memory_block_size = cmp::max(
792            min_memory_size,
793            pmp_region_size + initial_kernel_memory_size,
794        );
795
796        // Now, check to see whether the adjusted start and size still meet the
797        // allocation constraints, namely ensure that
798        //
799        //     start + memory_block_size
800        //         <= unallocated_memory_start + unallocated_memory_size
801        //
802        // , which ensures the PMP constraints didn't push us over the bounds of
803        // the provided memory region, and we can fit the entire allocation as
804        // requested by the kernel:
805        if start + memory_block_size > (unallocated_memory_start as usize) + unallocated_memory_size
806        {
807            // Overflowing the provided memory region, can't make allocation:
808            return None;
809        }
810
811        // Finally, check that this new region does not overlap with any
812        // existing configured userspace region:
813        for region in config.regions.iter() {
814            if region.0 != TORUserPMPCFG::OFF
815                && region_overlaps(region, start as *const u8, memory_block_size)
816            {
817                return None;
818            }
819        }
820
821        // All checks passed, store region allocation, indicate the
822        // app_memory_region, and mark config as dirty:
823        config.regions[region_num] = (
824            permissions.into(),
825            start as *const u8,
826            (start + pmp_region_size) as *const u8,
827        );
828        config.is_dirty.set(true);
829        config.app_memory_region.replace(region_num);
830
831        Some((start as *const u8, memory_block_size))
832    }
833
834    fn update_app_memory_region(
835        &self,
836        app_memory_break: *const u8,
837        kernel_memory_break: *const u8,
838        permissions: mpu::Permissions,
839        config: &mut Self::MpuConfig,
840    ) -> Result<(), ()> {
841        let region_num = config.app_memory_region.get().ok_or(())?;
842
843        let mut app_memory_break = app_memory_break as usize;
844        let kernel_memory_break = kernel_memory_break as usize;
845
846        // Ensure that the requested app_memory_break complies with PMP
847        // alignment constraints, namely that the region's end address is 4 byte
848        // aligned:
849        if app_memory_break % 4 != 0 {
850            app_memory_break += 4 - (app_memory_break % 4);
851        }
852
853        // Check if the app has run out of memory:
854        if app_memory_break > kernel_memory_break {
855            return Err(());
856        }
857
858        // If we're not out of memory, update the region configuration
859        // accordingly:
860        config.regions[region_num].0 = permissions.into();
861        config.regions[region_num].2 = app_memory_break as *const u8;
862        config.is_dirty.set(true);
863
864        Ok(())
865    }
866
867    fn configure_mpu(&self, config: &Self::MpuConfig) {
868        if !self.last_configured_for.contains(&config.id) || config.is_dirty.get() {
869            self.pmp.configure_pmp(&config.regions).unwrap();
870            config.is_dirty.set(false);
871            self.last_configured_for.set(config.id);
872        }
873    }
874}
875
876#[cfg(test)]
877pub mod test {
878    use super::{TORUserPMP, TORUserPMPCFG};
879
880    struct MockTORUserPMP;
881    impl<const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS> for MockTORUserPMP {
882        // Don't require any const-assertions in the MockTORUserPMP.
883        const CONST_ASSERT_CHECK: () = ();
884
885        fn available_regions(&self) -> usize {
886            // For the MockTORUserPMP, we always assume to have the full number
887            // of MPU_REGIONS available. More advanced tests may want to return
888            // a different number here (to simulate kernel memory protection)
889            // and make the configuration fail at runtime, for instance.
890            MPU_REGIONS
891        }
892
893        fn configure_pmp(
894            &self,
895            _regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
896        ) -> Result<(), ()> {
897            Ok(())
898        }
899
900        fn enable_user_pmp(&self) -> Result<(), ()> {
901            Ok(())
902        } // The kernel's MPU trait requires
903
904        fn disable_user_pmp(&self) {}
905    }
906
907    // TODO: implement more test cases, such as:
908    //
909    // - Try to update the app memory break with an invalid pointer below its
910    //   allocation's start address.
911
912    #[test]
913    fn test_mpu_region_no_overlap() {
914        use crate::pmp::PMPUserMPU;
915        use kernel::platform::mpu::{Permissions, MPU};
916
917        let mpu: PMPUserMPU<8, MockTORUserPMP> = PMPUserMPU::new(MockTORUserPMP);
918        let mut config = mpu
919            .new_config()
920            .expect("Failed to allocate the first MPU config");
921
922        // Allocate a region which spans from 0x40000000 to 0x80000000 (this
923        // meets PMP alignment constraints and will work on 32-bit and 64-bit
924        // systems)
925        let region_0 = mpu
926            .allocate_region(
927                0x40000000 as *const u8,
928                0x40000000,
929                0x40000000,
930                Permissions::ReadWriteOnly,
931                &mut config,
932            )
933            .expect(
934                "Failed to allocate a well-aligned R/W MPU region with \
935                 unallocated_memory_size == min_region_size",
936            );
937        assert!(region_0.start_address() == 0x40000000 as *const u8);
938        assert!(region_0.size() == 0x40000000);
939
940        // Try to allocate a region adjacent to `region_0`. This should work:
941        let region_1 = mpu
942            .allocate_region(
943                0x80000000 as *const u8,
944                0x10000000,
945                0x10000000,
946                Permissions::ReadExecuteOnly,
947                &mut config,
948            )
949            .expect(
950                "Failed to allocate a well-aligned R/W MPU region adjacent to \
951                 another region",
952            );
953        assert!(region_1.start_address() == 0x80000000 as *const u8);
954        assert!(region_1.size() == 0x10000000);
955
956        // Remove the previously allocated `region_1`:
957        mpu.remove_memory_region(region_1, &mut config)
958            .expect("Failed to remove valid MPU region allocation");
959
960        // Allocate another region which spans from 0xc0000000 to 0xd0000000
961        // (this meets PMP alignment constraints and will work on 32-bit and
962        // 64-bit systems), but this time allocate it using the
963        // `allocate_app_memory_region` method. We want a region of `0x20000000`
964        // bytes, but only the first `0x10000000` should be accessible to the
965        // app.
966        let (region_2_start, region_2_size) = mpu
967            .allocate_app_memory_region(
968                0xc0000000 as *const u8,
969                0x20000000,
970                0x20000000,
971                0x10000000,
972                0x08000000,
973                Permissions::ReadWriteOnly,
974                &mut config,
975            )
976            .expect(
977                "Failed to allocate a well-aligned R/W app memory MPU region \
978                 with unallocated_memory_size == min_region_size",
979            );
980        assert!(region_2_start == 0xc0000000 as *const u8);
981        assert!(region_2_size == 0x20000000);
982
983        // --> General overlap tests involving both regions
984
985        // Now, try to allocate another region that spans over both memory
986        // regions. This should fail.
987        assert!(mpu
988            .allocate_region(
989                0x40000000 as *const u8,
990                0xc0000000,
991                0xc0000000,
992                Permissions::ReadOnly,
993                &mut config,
994            )
995            .is_none());
996
997        // Try to allocate a region that spans over parts of both memory
998        // regions. This should fail.
999        assert!(mpu
1000            .allocate_region(
1001                0x48000000 as *const u8,
1002                0x80000000,
1003                0x80000000,
1004                Permissions::ReadOnly,
1005                &mut config,
1006            )
1007            .is_none());
1008
1009        // --> Overlap tests involving a single region (region_0)
1010        //
1011        // We define these in an array, such that we can run the tests with the
1012        // `region_0` defined (to confirm that the allocations are indeed
1013        // refused), and with `region_0` removed (to make sure they would work
1014        // in general).
1015        let overlap_region_0_tests = [
1016            (
1017                // Try to allocate a region that is contained within
1018                // `region_0`. This should fail.
1019                0x41000000 as *const u8,
1020                0x01000000,
1021                0x01000000,
1022                Permissions::ReadWriteOnly,
1023            ),
1024            (
1025                // Try to allocate a region that overlaps with `region_0` in the
1026                // front. This should fail.
1027                0x38000000 as *const u8,
1028                0x10000000,
1029                0x10000000,
1030                Permissions::ReadWriteExecute,
1031            ),
1032            (
1033                // Try to allocate a region that overlaps with `region_0` in the
1034                // back. This should fail.
1035                0x48000000 as *const u8,
1036                0x10000000,
1037                0x10000000,
1038                Permissions::ExecuteOnly,
1039            ),
1040            (
1041                // Try to allocate a region that spans over `region_0`. This
1042                // should fail.
1043                0x38000000 as *const u8,
1044                0x20000000,
1045                0x20000000,
1046                Permissions::ReadWriteOnly,
1047            ),
1048        ];
1049
1050        // Make sure that the allocation requests fail with `region_0` defined:
1051        for (memory_start, memory_size, length, perms) in overlap_region_0_tests.iter() {
1052            assert!(mpu
1053                .allocate_region(*memory_start, *memory_size, *length, *perms, &mut config,)
1054                .is_none());
1055        }
1056
1057        // Now, remove `region_0` and re-run the tests. Every test-case should
1058        // succeed now (in isolation, hence removing the successful allocations):
1059        mpu.remove_memory_region(region_0, &mut config)
1060            .expect("Failed to remove valid MPU region allocation");
1061
1062        for region @ (memory_start, memory_size, length, perms) in overlap_region_0_tests.iter() {
1063            let allocation_res =
1064                mpu.allocate_region(*memory_start, *memory_size, *length, *perms, &mut config);
1065
1066            match allocation_res {
1067                Some(region) => {
1068                    mpu.remove_memory_region(region, &mut config)
1069                        .expect("Failed to remove valid MPU region allocation");
1070                }
1071                None => {
1072                    panic!(
1073                        "Failed to allocate region that does not overlap and should meet alignment constraints: {:?}",
1074                        region
1075                    );
1076                }
1077            }
1078        }
1079
1080        // Make sure we can technically allocate a memory region that overlaps
1081        // with the kernel part of the `app_memory_region`.
1082        //
1083        // It is unclear whether this should be supported.
1084        let region_2 = mpu
1085            .allocate_region(
1086                0xd0000000 as *const u8,
1087                0x10000000,
1088                0x10000000,
1089                Permissions::ReadWriteOnly,
1090                &mut config,
1091            )
1092            .unwrap();
1093        assert!(region_2.start_address() == 0xd0000000 as *const u8);
1094        assert!(region_2.size() == 0x10000000);
1095
1096        // Now, we can grow the app memory break into this region:
1097        mpu.update_app_memory_region(
1098            0xd0000004 as *const u8,
1099            0xd8000000 as *const u8,
1100            Permissions::ReadWriteOnly,
1101            &mut config,
1102        )
1103        .expect("Failed to grow the app memory region into an existing other MPU region");
1104
1105        // Now, we have two overlapping MPU regions. Remove `region_2`, and try
1106        // to reallocate it as `region_3`. This should fail now, demonstrating
1107        // that we managed to reach an invalid intermediate state:
1108        mpu.remove_memory_region(region_2, &mut config)
1109            .expect("Failed to remove valid MPU region allocation");
1110        assert!(mpu
1111            .allocate_region(
1112                0xd0000000 as *const u8,
1113                0x10000000,
1114                0x10000000,
1115                Permissions::ReadWriteOnly,
1116                &mut config,
1117            )
1118            .is_none());
1119    }
1120}
1121
1122pub mod simple {
1123    use super::{pmpcfg_octet, TORUserPMP, TORUserPMPCFG};
1124    use crate::csr;
1125    use core::fmt;
1126    use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1127
1128    /// A "simple" RISC-V PMP implementation.
1129    ///
1130    /// The SimplePMP does not support locked regions, kernel memory protection,
1131    /// or any ePMP features (using the mseccfg CSR). It is generic over the
1132    /// number of hardware PMP regions available. `AVAILABLE_ENTRIES` is
1133    /// expected to be set to the number of available entries.
1134    ///
1135    /// [`SimplePMP`] implements [`TORUserPMP`] to expose all of its regions as
1136    /// "top of range" (TOR) regions (each taking up two physical PMP entires)
1137    /// for use as a user-mode memory protection mechanism.
1138    ///
1139    /// Notably, [`SimplePMP`] implements `TORUserPMP<MPU_REGIONS>` over a
1140    /// generic `MPU_REGIONS` where `MPU_REGIONS <= (AVAILABLE_ENTRIES / 2)`. As
1141    /// PMP re-configuration can have a significiant runtime overhead, users are
1142    /// free to specify a small `MPU_REGIONS` const-generic parameter to reduce
1143    /// the runtime overhead induced through PMP configuration, at the cost of
1144    /// having less PMP regions available to use for userspace memory
1145    /// protection.
1146    pub struct SimplePMP<const AVAILABLE_ENTRIES: usize>;
1147
1148    impl<const AVAILABLE_ENTRIES: usize> SimplePMP<AVAILABLE_ENTRIES> {
1149        pub unsafe fn new() -> Result<Self, ()> {
1150            // The SimplePMP does not support locked regions, kernel memory
1151            // protection, or any ePMP features (using the mseccfg CSR). Ensure
1152            // that we don't find any locked regions. If we don't have locked
1153            // regions and can still successfully execute code, this means that
1154            // we're not in the ePMP machine-mode lockdown mode, and can treat
1155            // our hardware as a regular PMP.
1156            //
1157            // Furthermore, we test whether we can use each entry (i.e. whether
1158            // it actually exists in HW) by flipping the RWX bits. If we can't
1159            // flip them, then `AVAILABLE_ENTRIES` is incorrect.  However, this
1160            // is not sufficient to check for locked regions, because of the
1161            // ePMP's rule-lock-bypass bit. If a rule is locked, it might be the
1162            // reason why we can execute code or read-write data in machine mode
1163            // right now. Thus, never try to touch a locked region, as we might
1164            // well revoke access to a kernel region!
1165            for i in 0..AVAILABLE_ENTRIES {
1166                // Read the entry's CSR:
1167                let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1168
1169                // Extract the entry's pmpcfg octet:
1170                let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1171                    pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1172                );
1173
1174                // As outlined above, we never touch a locked region. Thus, bail
1175                // out if it's locked:
1176                if pmpcfg.is_set(pmpcfg_octet::l) {
1177                    return Err(());
1178                }
1179
1180                // Now that it's not locked, we can be sure that regardless of
1181                // any ePMP bits, this region is either ignored or entirely
1182                // denied for machine-mode access. Hence, we can change it in
1183                // arbitrary ways without breaking our own memory access. Try to
1184                // flip the R/W/X bits:
1185                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1186
1187                // Check if the CSR changed:
1188                if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1189                    // Didn't change! This means that this region is not backed
1190                    // by HW. Return an error as `AVAILABLE_ENTRIES` is
1191                    // incorrect:
1192                    return Err(());
1193                }
1194
1195                // Finally, turn the region off:
1196                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1197            }
1198
1199            // Hardware PMP is verified to be in a compatible mode / state, and
1200            // has at least `AVAILABLE_ENTRIES` entries.
1201            Ok(SimplePMP)
1202        }
1203    }
1204
1205    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1206        for SimplePMP<AVAILABLE_ENTRIES>
1207    {
1208        // Ensure that the MPU_REGIONS (starting at entry, and occupying two
1209        // entries per region) don't overflow the available entires.
1210        const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= (AVAILABLE_ENTRIES / 2));
1211
1212        fn available_regions(&self) -> usize {
1213            // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1214            // support locked regions, or kernel protection.
1215            MPU_REGIONS
1216        }
1217
1218        // This implementation is specific for 32-bit systems. We use
1219        // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1220        // on 64-bit systems as well. However, this implementation will not work
1221        // on RV64I systems, due to the changed pmpcfgX CSR layout.
1222        fn configure_pmp(
1223            &self,
1224            regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1225        ) -> Result<(), ()> {
1226            // Could use `iter_array_chunks` once that's stable.
1227            let mut regions_iter = regions.iter();
1228            let mut i = 0;
1229
1230            while let Some(even_region) = regions_iter.next() {
1231                let odd_region_opt = regions_iter.next();
1232
1233                if let Some(odd_region) = odd_region_opt {
1234                    // We can configure two regions at once which, given that we
1235                    // start at index 0 (an even offset), translates to a single
1236                    // CSR write for the pmpcfgX register:
1237                    csr::CSR.pmpconfig_set(
1238                        i / 2,
1239                        u32::from_be_bytes([
1240                            odd_region.0.get(),
1241                            TORUserPMPCFG::OFF.get(),
1242                            even_region.0.get(),
1243                            TORUserPMPCFG::OFF.get(),
1244                        ]) as usize,
1245                    );
1246
1247                    // Now, set the addresses of the respective regions, if they
1248                    // are enabled, respectively:
1249                    if even_region.0 != TORUserPMPCFG::OFF {
1250                        csr::CSR
1251                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1252                        csr::CSR
1253                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1254                    }
1255
1256                    if odd_region.0 != TORUserPMPCFG::OFF {
1257                        csr::CSR
1258                            .pmpaddr_set(i * 2 + 2, (odd_region.1 as usize).overflowing_shr(2).0);
1259                        csr::CSR
1260                            .pmpaddr_set(i * 2 + 3, (odd_region.2 as usize).overflowing_shr(2).0);
1261                    }
1262
1263                    i += 2;
1264                } else {
1265                    // TODO: check overhead of code
1266                    // Modify the first two pmpcfgX octets for this region:
1267                    csr::CSR.pmpconfig_modify(
1268                        i / 2,
1269                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1270                            0x0000FFFF,
1271                            0,
1272                            u32::from_be_bytes([
1273                                0,
1274                                0,
1275                                even_region.0.get(),
1276                                TORUserPMPCFG::OFF.get(),
1277                            ]) as usize,
1278                        ),
1279                    );
1280
1281                    // Set the addresses if the region is enabled:
1282                    if even_region.0 != TORUserPMPCFG::OFF {
1283                        csr::CSR
1284                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1285                        csr::CSR
1286                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1287                    }
1288
1289                    i += 1;
1290                }
1291            }
1292
1293            Ok(())
1294        }
1295
1296        fn enable_user_pmp(&self) -> Result<(), ()> {
1297            // No-op. The SimplePMP does not have any kernel-enforced regions.
1298            Ok(())
1299        }
1300
1301        fn disable_user_pmp(&self) {
1302            // No-op. The SimplePMP does not have any kernel-enforced regions.
1303        }
1304    }
1305
1306    impl<const AVAILABLE_ENTRIES: usize> fmt::Display for SimplePMP<AVAILABLE_ENTRIES> {
1307        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1308            write!(f, " PMP hardware configuration -- entries: \r\n")?;
1309            unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }
1310        }
1311    }
1312}
1313
1314pub mod kernel_protection {
1315    use super::{pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG};
1316    use crate::csr;
1317    use core::fmt;
1318    use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1319
1320    // ---------- Kernel memory-protection PMP memory region wrapper types -----
1321    //
1322    // These types exist primarily to avoid argument confusion in the
1323    // [`KernelProtectionPMP`] constructor, which accepts the addresses of these
1324    // memory regions as arguments. They further encode whether a region must
1325    // adhere to the `NAPOT` or `TOR` addressing mode constraints:
1326
1327    /// The flash memory region address range.
1328    ///
1329    /// Configured in the PMP as a `NAPOT` region.
1330    #[derive(Copy, Clone, Debug)]
1331    pub struct FlashRegion(pub NAPOTRegionSpec);
1332
1333    /// The RAM region address range.
1334    ///
1335    /// Configured in the PMP as a `NAPOT` region.
1336    #[derive(Copy, Clone, Debug)]
1337    pub struct RAMRegion(pub NAPOTRegionSpec);
1338
1339    /// The MMIO region address range.
1340    ///
1341    /// Configured in the PMP as a `NAPOT` region.
1342    #[derive(Copy, Clone, Debug)]
1343    pub struct MMIORegion(pub NAPOTRegionSpec);
1344
1345    /// The PMP region specification for the kernel `.text` section.
1346    ///
1347    /// This is to be made accessible to machine-mode as read-execute.
1348    /// Configured in the PMP as a `TOR` region.
1349    #[derive(Copy, Clone, Debug)]
1350    pub struct KernelTextRegion(pub TORRegionSpec);
1351
1352    /// A RISC-V PMP implementation which supports machine-mode (kernel) memory
1353    /// protection, with a fixed number of "kernel regions" (such as `.text`,
1354    /// flash, RAM and MMIO).
1355    ///
1356    /// This implementation will configure the PMP in the following way:
1357    ///
1358    ///   ```text
1359    ///   |-------+-----------------------------------------+-------+---+-------|
1360    ///   | ENTRY | REGION / ADDR                           | MODE  | L | PERMS |
1361    ///   |-------+-----------------------------------------+-------+---+-------|
1362    ///   |     0 | /                                     \ | OFF   |   |       |
1363    ///   |     1 | \ Userspace TOR region #0             / | TOR   |   | ????? |
1364    ///   |       |                                         |       |   |       |
1365    ///   |     2 | /                                     \ | OFF   |   |       |
1366    ///   |     3 | \ Userspace TOR region #1             / | TOR   |   | ????? |
1367    ///   |       |                                         |       |   |       |
1368    ///   | 4 ... | /                                     \ |       |   |       |
1369    ///   | n - 8 | \ Userspace TOR region #x             / |       |   |       |
1370    ///   |       |                                         |       |   |       |
1371    ///   | n - 7 | "Deny-all" user-mode rule (all memory)  | NAPOT |   | ----- |
1372    ///   |       |                                         |       |   |       |
1373    ///   | n - 6 | --------------------------------------- | OFF   | X | ----- |
1374    ///   | n - 5 | Kernel .text section                    | TOR   | X | R/X   |
1375    ///   |       |                                         |       |   |       |
1376    ///   | n - 4 | FLASH (spanning kernel & apps)          | NAPOT | X | R     |
1377    ///   |       |                                         |       |   |       |
1378    ///   | n - 3 | RAM (spanning kernel & apps)            | NAPOT | X | R/W   |
1379    ///   |       |                                         |       |   |       |
1380    ///   | n - 2 | MMIO                                    | NAPOT | X | R/W   |
1381    ///   |       |                                         |       |   |       |
1382    ///   | n - 1 | "Deny-all" machine-mode    (all memory) | NAPOT | X | ----- |
1383    ///   |-------+-----------------------------------------+-------+---+-------|
1384    ///   ```
1385    ///
1386    /// This implementation does not use any `mseccfg` protection bits (ePMP
1387    /// functionality). To protect machine-mode (kernel) memory regions, regions
1388    /// must be marked as locked. However, locked regions apply to both user-
1389    /// and machine-mode. Thus, region `n - 7` serves as a "deny-all" user-mode
1390    /// rule, which prohibits all accesses not explicitly allowed through rules
1391    /// `< n - 7`. Kernel memory is made accessible underneath this "deny-all"
1392    /// region, which does not apply to machine-mode.
1393    ///
1394    /// This PMP implementation supports the [`TORUserPMP`] interface with
1395    /// `MPU_REGIONS <= ((AVAILABLE_ENTRIES - 7) / 2)`, to leave sufficient
1396    /// space for the "deny-all" and kernel regions. This constraint is enforced
1397    /// through the [`KernelProtectionPMP::CONST_ASSERT_CHECK`] associated
1398    /// constant, which MUST be evaluated by the consumer of the [`TORUserPMP`]
1399    /// trait (usually the [`PMPUserMPU`](super::PMPUserMPU) implementation).
1400    pub struct KernelProtectionPMP<const AVAILABLE_ENTRIES: usize>;
1401
1402    impl<const AVAILABLE_ENTRIES: usize> KernelProtectionPMP<AVAILABLE_ENTRIES> {
1403        pub unsafe fn new(
1404            flash: FlashRegion,
1405            ram: RAMRegion,
1406            mmio: MMIORegion,
1407            kernel_text: KernelTextRegion,
1408        ) -> Result<Self, ()> {
1409            for i in 0..AVAILABLE_ENTRIES {
1410                // Read the entry's CSR:
1411                let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1412
1413                // Extract the entry's pmpcfg octet:
1414                let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1415                    pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1416                );
1417
1418                // As outlined above, we never touch a locked region. Thus, bail
1419                // out if it's locked:
1420                if pmpcfg.is_set(pmpcfg_octet::l) {
1421                    return Err(());
1422                }
1423
1424                // Now that it's not locked, we can be sure that regardless of
1425                // any ePMP bits, this region is either ignored or entirely
1426                // denied for machine-mode access. Hence, we can change it in
1427                // arbitrary ways without breaking our own memory access. Try to
1428                // flip the R/W/X bits:
1429                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1430
1431                // Check if the CSR changed:
1432                if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1433                    // Didn't change! This means that this region is not backed
1434                    // by HW. Return an error as `AVAILABLE_ENTRIES` is
1435                    // incorrect:
1436                    return Err(());
1437                }
1438
1439                // Finally, turn the region off:
1440                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1441            }
1442
1443            // -----------------------------------------------------------------
1444            // Hardware PMP is verified to be in a compatible mode & state, and
1445            // has at least `AVAILABLE_ENTRIES` entries.
1446            // -----------------------------------------------------------------
1447
1448            // Now we need to set up the various kernel memory protection
1449            // regions, and the deny-all userspace region (n - 8), never
1450            // modified.
1451
1452            // Helper to modify an arbitrary PMP entry. Because we don't know
1453            // AVAILABLE_ENTRIES in advance, there's no good way to
1454            // optimize this further.
1455            fn write_pmpaddr_pmpcfg(i: usize, pmpcfg: u8, pmpaddr: usize) {
1456                csr::CSR.pmpaddr_set(i, pmpaddr);
1457                csr::CSR.pmpconfig_modify(
1458                    i / 4,
1459                    FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1460                        0x000000FF_usize,
1461                        (i % 4) * 8,
1462                        u32::from_be_bytes([0, 0, 0, pmpcfg]) as usize,
1463                    ),
1464                );
1465            }
1466
1467            // Set the kernel `.text`, flash, RAM and MMIO regions, in no
1468            // particular order, with the exception of `.text` and flash:
1469            // `.text` must precede flash, as otherwise we'd be revoking execute
1470            // permissions temporarily. Given that we can currently execute
1471            // code, this should not have any impact on our accessible memory,
1472            // assuming that the provided regions are not otherwise aliased.
1473
1474            // MMIO at n - 2:
1475            write_pmpaddr_pmpcfg(
1476                AVAILABLE_ENTRIES - 2,
1477                (pmpcfg_octet::a::NAPOT
1478                    + pmpcfg_octet::r::SET
1479                    + pmpcfg_octet::w::SET
1480                    + pmpcfg_octet::x::CLEAR
1481                    + pmpcfg_octet::l::SET)
1482                    .into(),
1483                mmio.0.napot_addr(),
1484            );
1485
1486            // RAM at n - 3:
1487            write_pmpaddr_pmpcfg(
1488                AVAILABLE_ENTRIES - 3,
1489                (pmpcfg_octet::a::NAPOT
1490                    + pmpcfg_octet::r::SET
1491                    + pmpcfg_octet::w::SET
1492                    + pmpcfg_octet::x::CLEAR
1493                    + pmpcfg_octet::l::SET)
1494                    .into(),
1495                ram.0.napot_addr(),
1496            );
1497
1498            // `.text` at n - 6 and n - 5 (TOR region):
1499            write_pmpaddr_pmpcfg(
1500                AVAILABLE_ENTRIES - 6,
1501                (pmpcfg_octet::a::OFF
1502                    + pmpcfg_octet::r::CLEAR
1503                    + pmpcfg_octet::w::CLEAR
1504                    + pmpcfg_octet::x::CLEAR
1505                    + pmpcfg_octet::l::SET)
1506                    .into(),
1507                (kernel_text.0.start() as usize) >> 2,
1508            );
1509            write_pmpaddr_pmpcfg(
1510                AVAILABLE_ENTRIES - 5,
1511                (pmpcfg_octet::a::TOR
1512                    + pmpcfg_octet::r::SET
1513                    + pmpcfg_octet::w::CLEAR
1514                    + pmpcfg_octet::x::SET
1515                    + pmpcfg_octet::l::SET)
1516                    .into(),
1517                (kernel_text.0.end() as usize) >> 2,
1518            );
1519
1520            // flash at n - 4:
1521            write_pmpaddr_pmpcfg(
1522                AVAILABLE_ENTRIES - 4,
1523                (pmpcfg_octet::a::NAPOT
1524                    + pmpcfg_octet::r::SET
1525                    + pmpcfg_octet::w::CLEAR
1526                    + pmpcfg_octet::x::CLEAR
1527                    + pmpcfg_octet::l::SET)
1528                    .into(),
1529                flash.0.napot_addr(),
1530            );
1531
1532            // Now that the kernel has explicit region definitions for any
1533            // memory that it needs to have access to, we can deny other memory
1534            // accesses in our very last rule (n - 1):
1535            write_pmpaddr_pmpcfg(
1536                AVAILABLE_ENTRIES - 1,
1537                (pmpcfg_octet::a::NAPOT
1538                    + pmpcfg_octet::r::CLEAR
1539                    + pmpcfg_octet::w::CLEAR
1540                    + pmpcfg_octet::x::CLEAR
1541                    + pmpcfg_octet::l::SET)
1542                    .into(),
1543                // the entire address space:
1544                0x7FFFFFFF,
1545            );
1546
1547            // Finally, we configure the non-locked user-mode deny all
1548            // rule. This must never be removed, or otherwise usermode will be
1549            // able to access all locked regions (which are supposed to be
1550            // exclusively accessible to kernel-mode):
1551            write_pmpaddr_pmpcfg(
1552                AVAILABLE_ENTRIES - 7,
1553                (pmpcfg_octet::a::NAPOT
1554                    + pmpcfg_octet::r::CLEAR
1555                    + pmpcfg_octet::w::CLEAR
1556                    + pmpcfg_octet::x::CLEAR
1557                    + pmpcfg_octet::l::CLEAR)
1558                    .into(),
1559                // the entire address space:
1560                0x7FFFFFFF,
1561            );
1562
1563            // Setup complete
1564            Ok(KernelProtectionPMP)
1565        }
1566    }
1567
1568    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1569        for KernelProtectionPMP<AVAILABLE_ENTRIES>
1570    {
1571        /// Ensure that the MPU_REGIONS (starting at entry, and occupying two
1572        /// entries per region) don't overflow the available entires, excluding
1573        /// the 7 entires used for implementing the kernel memory protection.
1574        const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= ((AVAILABLE_ENTRIES - 7) / 2));
1575
1576        fn available_regions(&self) -> usize {
1577            // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1578            // support locking additional regions at runtime.
1579            MPU_REGIONS
1580        }
1581
1582        // This implementation is specific for 32-bit systems. We use
1583        // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1584        // on 64-bit systems as well. However, this implementation will not work
1585        // on RV64I systems, due to the changed pmpcfgX CSR layout.
1586        fn configure_pmp(
1587            &self,
1588            regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1589        ) -> Result<(), ()> {
1590            // Could use `iter_array_chunks` once that's stable.
1591            let mut regions_iter = regions.iter();
1592            let mut i = 0;
1593
1594            while let Some(even_region) = regions_iter.next() {
1595                let odd_region_opt = regions_iter.next();
1596
1597                if let Some(odd_region) = odd_region_opt {
1598                    // We can configure two regions at once which, given that we
1599                    // start at index 0 (an even offset), translates to a single
1600                    // CSR write for the pmpcfgX register:
1601                    csr::CSR.pmpconfig_set(
1602                        i / 2,
1603                        u32::from_be_bytes([
1604                            odd_region.0.get(),
1605                            TORUserPMPCFG::OFF.get(),
1606                            even_region.0.get(),
1607                            TORUserPMPCFG::OFF.get(),
1608                        ]) as usize,
1609                    );
1610
1611                    // Now, set the addresses of the respective regions, if they
1612                    // are enabled, respectively:
1613                    if even_region.0 != TORUserPMPCFG::OFF {
1614                        csr::CSR
1615                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1616                        csr::CSR
1617                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1618                    }
1619
1620                    if odd_region.0 != TORUserPMPCFG::OFF {
1621                        csr::CSR
1622                            .pmpaddr_set(i * 2 + 2, (odd_region.1 as usize).overflowing_shr(2).0);
1623                        csr::CSR
1624                            .pmpaddr_set(i * 2 + 3, (odd_region.2 as usize).overflowing_shr(2).0);
1625                    }
1626
1627                    i += 2;
1628                } else {
1629                    // Modify the first two pmpcfgX octets for this region:
1630                    csr::CSR.pmpconfig_modify(
1631                        i / 2,
1632                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1633                            0x0000FFFF,
1634                            0,
1635                            u32::from_be_bytes([
1636                                0,
1637                                0,
1638                                even_region.0.get(),
1639                                TORUserPMPCFG::OFF.get(),
1640                            ]) as usize,
1641                        ),
1642                    );
1643
1644                    // Set the addresses if the region is enabled:
1645                    if even_region.0 != TORUserPMPCFG::OFF {
1646                        csr::CSR
1647                            .pmpaddr_set(i * 2 + 0, (even_region.1 as usize).overflowing_shr(2).0);
1648                        csr::CSR
1649                            .pmpaddr_set(i * 2 + 1, (even_region.2 as usize).overflowing_shr(2).0);
1650                    }
1651
1652                    i += 1;
1653                }
1654            }
1655
1656            Ok(())
1657        }
1658
1659        fn enable_user_pmp(&self) -> Result<(), ()> {
1660            // No-op. User-mode regions are never enforced in machine-mode, and
1661            // thus can be configured direct and may stay enabled in
1662            // machine-mode.
1663            Ok(())
1664        }
1665
1666        fn disable_user_pmp(&self) {
1667            // No-op. User-mode regions are never enforced in machine-mode, and
1668            // thus can be configured direct and may stay enabled in
1669            // machine-mode.
1670        }
1671    }
1672
1673    impl<const AVAILABLE_ENTRIES: usize> fmt::Display for KernelProtectionPMP<AVAILABLE_ENTRIES> {
1674        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1675            write!(f, " PMP hardware configuration -- entries: \r\n")?;
1676            unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }
1677        }
1678    }
1679}
1680
1681pub mod kernel_protection_mml_epmp {
1682    use super::{pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG};
1683    use crate::csr;
1684    use core::cell::Cell;
1685    use core::fmt;
1686    use kernel::platform::mpu;
1687    use kernel::utilities::registers::interfaces::{Readable, Writeable};
1688    use kernel::utilities::registers::{FieldValue, LocalRegisterCopy};
1689
1690    // ---------- Kernel memory-protection PMP memory region wrapper types -----
1691    //
1692    // These types exist primarily to avoid argument confusion in the
1693    // [`KernelProtectionMMLEPMP`] constructor, which accepts the addresses of
1694    // these memory regions as arguments. They further encode whether a region
1695    // must adhere to the `NAPOT` or `TOR` addressing mode constraints:
1696
1697    /// The flash memory region address range.
1698    ///
1699    /// Configured in the PMP as a `NAPOT` region.
1700    #[derive(Copy, Clone, Debug)]
1701    pub struct FlashRegion(pub NAPOTRegionSpec);
1702
1703    /// The RAM region address range.
1704    ///
1705    /// Configured in the PMP as a `NAPOT` region.
1706    #[derive(Copy, Clone, Debug)]
1707    pub struct RAMRegion(pub NAPOTRegionSpec);
1708
1709    /// The MMIO region address range.
1710    ///
1711    /// Configured in the PMP as a `NAPOT` region.
1712    #[derive(Copy, Clone, Debug)]
1713    pub struct MMIORegion(pub NAPOTRegionSpec);
1714
1715    /// The PMP region specification for the kernel `.text` section.
1716    ///
1717    /// This is to be made accessible to machine-mode as read-execute.
1718    /// Configured in the PMP as a `TOR` region.
1719    #[derive(Copy, Clone, Debug)]
1720    pub struct KernelTextRegion(pub TORRegionSpec);
1721
1722    /// A RISC-V ePMP implementation.
1723    ///
1724    /// Supports machine-mode (kernel) memory protection by using the
1725    /// machine-mode lockdown mode (MML), with a fixed number of
1726    /// "kernel regions" (such as `.text`, flash, RAM and MMIO).
1727    ///
1728    /// This implementation will configure the ePMP in the following way:
1729    ///
1730    /// - `mseccfg` CSR:
1731    ///   ```text
1732    ///   |-------------+-----------------------------------------------+-------|
1733    ///   | MSECCFG BIT | LABEL                                         | STATE |
1734    ///   |-------------+-----------------------------------------------+-------|
1735    ///   |           0 | Machine-Mode Lockdown (MML)                   |     1 |
1736    ///   |           1 | Machine-Mode Whitelist Policy (MMWP)          |     1 |
1737    ///   |           2 | Rule-Lock Bypass (RLB)                        |     0 |
1738    ///   |-------------+-----------------------------------------------+-------|
1739    ///   ```
1740    ///
1741    /// - `pmpaddrX` / `pmpcfgX` CSRs:
1742    ///   ```text
1743    ///   |-------+-----------------------------------------+-------+---+-------|
1744    ///   | ENTRY | REGION / ADDR                           | MODE  | L | PERMS |
1745    ///   |-------+-----------------------------------------+-------+---+-------|
1746    ///   |     0 | --------------------------------------- | OFF   | X | ----- |
1747    ///   |     1 | Kernel .text section                    | TOR   | X | R/X   |
1748    ///   |       |                                         |       |   |       |
1749    ///   |     2 | /                                     \ | OFF   |   |       |
1750    ///   |     3 | \ Userspace TOR region #0             / | TOR   |   | ????? |
1751    ///   |       |                                         |       |   |       |
1752    ///   |     4 | /                                     \ | OFF   |   |       |
1753    ///   |     5 | \ Userspace TOR region #1             / | TOR   |   | ????? |
1754    ///   |       |                                         |       |   |       |
1755    ///   | 6 ... | /                                     \ |       |   |       |
1756    ///   | n - 4 | \ Userspace TOR region #x             / |       |   |       |
1757    ///   |       |                                         |       |   |       |
1758    ///   | n - 3 | FLASH (spanning kernel & apps)          | NAPOT | X | R     |
1759    ///   |       |                                         |       |   |       |
1760    ///   | n - 2 | RAM (spanning kernel & apps)            | NAPOT | X | R/W   |
1761    ///   |       |                                         |       |   |       |
1762    ///   | n - 1 | MMIO                                    | NAPOT | X | R/W   |
1763    ///   |-------+-----------------------------------------+-------+---+-------|
1764    ///   ```
1765    ///
1766    /// Crucially, this implementation relies on an unconfigured hardware PMP
1767    /// implementing the ePMP (`mseccfg` CSR) extension, providing the Machine
1768    /// Lockdown Mode (MML) security bit. This bit is required to ensure that
1769    /// any machine-mode (kernel) protection regions (lock bit set) are only
1770    /// accessible to kernel mode.
1771    pub struct KernelProtectionMMLEPMP<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> {
1772        user_pmp_enabled: Cell<bool>,
1773        shadow_user_pmpcfgs: [Cell<TORUserPMPCFG>; MPU_REGIONS],
1774    }
1775
1776    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize>
1777        KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
1778    {
1779        // Start user-mode TOR regions after the first kernel .text region:
1780        const TOR_REGIONS_OFFSET: usize = 1;
1781
1782        pub unsafe fn new(
1783            flash: FlashRegion,
1784            ram: RAMRegion,
1785            mmio: MMIORegion,
1786            kernel_text: KernelTextRegion,
1787        ) -> Result<Self, ()> {
1788            for i in 0..AVAILABLE_ENTRIES {
1789                // Read the entry's CSR:
1790                let pmpcfg_csr = csr::CSR.pmpconfig_get(i / 4);
1791
1792                // Extract the entry's pmpcfg octet:
1793                let pmpcfg: LocalRegisterCopy<u8, pmpcfg_octet::Register> = LocalRegisterCopy::new(
1794                    pmpcfg_csr.overflowing_shr(((i % 4) * 8) as u32).0 as u8,
1795                );
1796
1797                // As outlined above, we never touch a locked region. Thus, bail
1798                // out if it's locked:
1799                if pmpcfg.is_set(pmpcfg_octet::l) {
1800                    return Err(());
1801                }
1802
1803                // Now that it's not locked, we can be sure that regardless of
1804                // any ePMP bits, this region is either ignored or entirely
1805                // denied for machine-mode access. Hence, we can change it in
1806                // arbitrary ways without breaking our own memory access. Try to
1807                // flip the R/W/X bits:
1808                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr ^ (7 << ((i % 4) * 8)));
1809
1810                // Check if the CSR changed:
1811                if pmpcfg_csr == csr::CSR.pmpconfig_get(i / 4) {
1812                    // Didn't change! This means that this region is not backed
1813                    // by HW. Return an error as `AVAILABLE_ENTRIES` is
1814                    // incorrect:
1815                    return Err(());
1816                }
1817
1818                // Finally, turn the region off:
1819                csr::CSR.pmpconfig_set(i / 4, pmpcfg_csr & !(0x18 << ((i % 4) * 8)));
1820            }
1821
1822            // -----------------------------------------------------------------
1823            // Hardware PMP is verified to be in a compatible mode & state, and
1824            // has at least `AVAILABLE_ENTRIES` entries. We have not yet checked
1825            // whether the PMP is actually an _e_PMP. However, we don't want to
1826            // produce a gadget to set RLB, and so the only safe way to test
1827            // this is to set up the PMP regions and then try to enable the
1828            // mseccfg bits.
1829            // -----------------------------------------------------------------
1830
1831            // Helper to modify an arbitrary PMP entry. Because we don't know
1832            // AVAILABLE_ENTRIES in advance, there's no good way to
1833            // optimize this further.
1834            fn write_pmpaddr_pmpcfg(i: usize, pmpcfg: u8, pmpaddr: usize) {
1835                // Important to set the address first. Locking the pmpcfg
1836                // register will also lock the adress register!
1837                csr::CSR.pmpaddr_set(i, pmpaddr);
1838                csr::CSR.pmpconfig_modify(
1839                    i / 4,
1840                    FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1841                        0x000000FF_usize,
1842                        (i % 4) * 8,
1843                        u32::from_be_bytes([0, 0, 0, pmpcfg]) as usize,
1844                    ),
1845                );
1846            }
1847
1848            // Set the kernel `.text`, flash, RAM and MMIO regions, in no
1849            // particular order, with the exception of `.text` and flash:
1850            // `.text` must precede flash, as otherwise we'd be revoking execute
1851            // permissions temporarily. Given that we can currently execute
1852            // code, this should not have any impact on our accessible memory,
1853            // assuming that the provided regions are not otherwise aliased.
1854
1855            // `.text` at n - 5 and n - 4 (TOR region):
1856            write_pmpaddr_pmpcfg(
1857                0,
1858                (pmpcfg_octet::a::OFF
1859                    + pmpcfg_octet::r::CLEAR
1860                    + pmpcfg_octet::w::CLEAR
1861                    + pmpcfg_octet::x::CLEAR
1862                    + pmpcfg_octet::l::SET)
1863                    .into(),
1864                (kernel_text.0.start() as usize) >> 2,
1865            );
1866            write_pmpaddr_pmpcfg(
1867                1,
1868                (pmpcfg_octet::a::TOR
1869                    + pmpcfg_octet::r::SET
1870                    + pmpcfg_octet::w::CLEAR
1871                    + pmpcfg_octet::x::SET
1872                    + pmpcfg_octet::l::SET)
1873                    .into(),
1874                (kernel_text.0.end() as usize) >> 2,
1875            );
1876
1877            // MMIO at n - 1:
1878            write_pmpaddr_pmpcfg(
1879                AVAILABLE_ENTRIES - 1,
1880                (pmpcfg_octet::a::NAPOT
1881                    + pmpcfg_octet::r::SET
1882                    + pmpcfg_octet::w::SET
1883                    + pmpcfg_octet::x::CLEAR
1884                    + pmpcfg_octet::l::SET)
1885                    .into(),
1886                mmio.0.napot_addr(),
1887            );
1888
1889            // RAM at n - 2:
1890            write_pmpaddr_pmpcfg(
1891                AVAILABLE_ENTRIES - 2,
1892                (pmpcfg_octet::a::NAPOT
1893                    + pmpcfg_octet::r::SET
1894                    + pmpcfg_octet::w::SET
1895                    + pmpcfg_octet::x::CLEAR
1896                    + pmpcfg_octet::l::SET)
1897                    .into(),
1898                ram.0.napot_addr(),
1899            );
1900
1901            // flash at n - 3:
1902            write_pmpaddr_pmpcfg(
1903                AVAILABLE_ENTRIES - 3,
1904                (pmpcfg_octet::a::NAPOT
1905                    + pmpcfg_octet::r::SET
1906                    + pmpcfg_octet::w::CLEAR
1907                    + pmpcfg_octet::x::CLEAR
1908                    + pmpcfg_octet::l::SET)
1909                    .into(),
1910                flash.0.napot_addr(),
1911            );
1912
1913            // Finally, attempt to enable the MSECCFG security bits, and verify
1914            // that they have been set correctly. If they have not been set to
1915            // the written value, this means that this hardware either does not
1916            // support ePMP, or it was in some invalid state otherwise. We don't
1917            // need to read back the above regions, as we previous verified that
1918            // none of their entries were locked -- so writing to them must work
1919            // even without RLB set.
1920            //
1921            // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
1922            csr::CSR.mseccfg.set(0x00000003);
1923
1924            // Read back the MSECCFG CSR to ensure that the machine's security
1925            // configuration was set properly. If this fails, we have set up the
1926            // PMP in a way that would give userspace access to kernel
1927            // space. The caller of this method must appropriately handle this
1928            // error condition by ensuring that the platform will never execute
1929            // userspace code!
1930            if csr::CSR.mseccfg.get() != 0x00000003 {
1931                return Err(());
1932            }
1933
1934            // Setup complete
1935            const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
1936            Ok(KernelProtectionMMLEPMP {
1937                user_pmp_enabled: Cell::new(false),
1938                shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; MPU_REGIONS],
1939            })
1940        }
1941    }
1942
1943    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> TORUserPMP<MPU_REGIONS>
1944        for KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
1945    {
1946        // Ensure that the MPU_REGIONS (starting at entry, and occupying two
1947        // entries per region) don't overflow the available entires, excluding
1948        // the 7 entries used for implementing the kernel memory protection:
1949        const CONST_ASSERT_CHECK: () = assert!(MPU_REGIONS <= ((AVAILABLE_ENTRIES - 5) / 2));
1950
1951        fn available_regions(&self) -> usize {
1952            // Always assume to have `MPU_REGIONS` usable TOR regions. We don't
1953            // support locking additional regions at runtime.
1954            MPU_REGIONS
1955        }
1956
1957        // This implementation is specific for 32-bit systems. We use
1958        // `u32::from_be_bytes` and then cast to usize, as it manages to compile
1959        // on 64-bit systems as well. However, this implementation will not work
1960        // on RV64I systems, due to the changed pmpcfgX CSR layout.
1961        fn configure_pmp(
1962            &self,
1963            regions: &[(TORUserPMPCFG, *const u8, *const u8); MPU_REGIONS],
1964        ) -> Result<(), ()> {
1965            // Configure all of the regions' addresses and store their pmpcfg octets
1966            // in our shadow storage. If the user PMP is already enabled, we further
1967            // apply this configuration (set the pmpcfgX CSRs) by running
1968            // `enable_user_pmp`:
1969            for (i, (region, shadow_user_pmpcfg)) in regions
1970                .iter()
1971                .zip(self.shadow_user_pmpcfgs.iter())
1972                .enumerate()
1973            {
1974                // The ePMP in MML mode does not support read-write-execute
1975                // regions. If such a region is to be configured, abort. As this
1976                // loop here only modifies the shadow state, we can simply abort and
1977                // return an error. We don't make any promises about the ePMP state
1978                // if the configuration files, but it is still being activated with
1979                // `enable_user_pmp`:
1980                if region.0.get()
1981                    == <TORUserPMPCFG as From<mpu::Permissions>>::from(
1982                        mpu::Permissions::ReadWriteExecute,
1983                    )
1984                    .get()
1985                {
1986                    return Err(());
1987                }
1988
1989                // Set the CSR addresses for this region (if its not OFF, in which
1990                // case the hardware-configured addresses are irrelevant):
1991                if region.0 != TORUserPMPCFG::OFF {
1992                    csr::CSR.pmpaddr_set(
1993                        (i + Self::TOR_REGIONS_OFFSET) * 2 + 0,
1994                        (region.1 as usize).overflowing_shr(2).0,
1995                    );
1996                    csr::CSR.pmpaddr_set(
1997                        (i + Self::TOR_REGIONS_OFFSET) * 2 + 1,
1998                        (region.2 as usize).overflowing_shr(2).0,
1999                    );
2000                }
2001
2002                // Store the region's pmpcfg octet:
2003                shadow_user_pmpcfg.set(region.0);
2004            }
2005
2006            // If the PMP is currently active, apply the changes to the CSRs:
2007            if self.user_pmp_enabled.get() {
2008                self.enable_user_pmp()?;
2009            }
2010
2011            Ok(())
2012        }
2013
2014        fn enable_user_pmp(&self) -> Result<(), ()> {
2015            // We store the "enabled" PMPCFG octets of user regions in the
2016            // `shadow_user_pmpcfg` field, such that we can re-enable the PMP
2017            // without a call to `configure_pmp` (where the `TORUserPMPCFG`s are
2018            // provided by the caller).
2019
2020            // Could use `iter_array_chunks` once that's stable.
2021            let mut shadow_user_pmpcfgs_iter = self.shadow_user_pmpcfgs.iter();
2022            let mut i = Self::TOR_REGIONS_OFFSET;
2023
2024            while let Some(first_region_pmpcfg) = shadow_user_pmpcfgs_iter.next() {
2025                // If we're at a "region" offset divisible by two (where "region" =
2026                // 2 PMP "entries"), then we can configure an entire `pmpcfgX` CSR
2027                // in one operation. As CSR writes are expensive, this is an
2028                // operation worth making:
2029                let second_region_opt = if i % 2 == 0 {
2030                    shadow_user_pmpcfgs_iter.next()
2031                } else {
2032                    None
2033                };
2034
2035                if let Some(second_region_pmpcfg) = second_region_opt {
2036                    // We're at an even index and have two regions to configure, so
2037                    // do that with a single CSR write:
2038                    csr::CSR.pmpconfig_set(
2039                        i / 2,
2040                        u32::from_be_bytes([
2041                            second_region_pmpcfg.get().get(),
2042                            TORUserPMPCFG::OFF.get(),
2043                            first_region_pmpcfg.get().get(),
2044                            TORUserPMPCFG::OFF.get(),
2045                        ]) as usize,
2046                    );
2047
2048                    i += 2;
2049                } else if i % 2 == 0 {
2050                    // This is a single region at an even index. Thus, modify the
2051                    // first two pmpcfgX octets for this region.
2052                    csr::CSR.pmpconfig_modify(
2053                        i / 2,
2054                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2055                            0x0000FFFF,
2056                            0, // lower two octets
2057                            u32::from_be_bytes([
2058                                0,
2059                                0,
2060                                first_region_pmpcfg.get().get(),
2061                                TORUserPMPCFG::OFF.get(),
2062                            ]) as usize,
2063                        ),
2064                    );
2065
2066                    i += 1;
2067                } else {
2068                    // This is a single region at an odd index. Thus, modify the
2069                    // latter two pmpcfgX octets for this region.
2070                    csr::CSR.pmpconfig_modify(
2071                        i / 2,
2072                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2073                            0x0000FFFF,
2074                            16, // higher two octets
2075                            u32::from_be_bytes([
2076                                0,
2077                                0,
2078                                first_region_pmpcfg.get().get(),
2079                                TORUserPMPCFG::OFF.get(),
2080                            ]) as usize,
2081                        ),
2082                    );
2083
2084                    i += 1;
2085                }
2086            }
2087
2088            self.user_pmp_enabled.set(true);
2089
2090            Ok(())
2091        }
2092
2093        fn disable_user_pmp(&self) {
2094            // Simply set all of the user-region pmpcfg octets to OFF:
2095
2096            let mut user_region_pmpcfg_octet_pairs =
2097                (Self::TOR_REGIONS_OFFSET)..(Self::TOR_REGIONS_OFFSET + MPU_REGIONS);
2098            while let Some(first_region_idx) = user_region_pmpcfg_octet_pairs.next() {
2099                let second_region_opt = if first_region_idx % 2 == 0 {
2100                    user_region_pmpcfg_octet_pairs.next()
2101                } else {
2102                    None
2103                };
2104
2105                if let Some(_second_region_idx) = second_region_opt {
2106                    // We're at an even index and have two regions to configure, so
2107                    // do that with a single CSR write:
2108                    csr::CSR.pmpconfig_set(
2109                        first_region_idx / 2,
2110                        u32::from_be_bytes([
2111                            TORUserPMPCFG::OFF.get(),
2112                            TORUserPMPCFG::OFF.get(),
2113                            TORUserPMPCFG::OFF.get(),
2114                            TORUserPMPCFG::OFF.get(),
2115                        ]) as usize,
2116                    );
2117                } else if first_region_idx % 2 == 0 {
2118                    // This is a single region at an even index. Thus, modify the
2119                    // first two pmpcfgX octets for this region.
2120                    csr::CSR.pmpconfig_modify(
2121                        first_region_idx / 2,
2122                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2123                            0x0000FFFF,
2124                            0, // lower two octets
2125                            u32::from_be_bytes([
2126                                0,
2127                                0,
2128                                TORUserPMPCFG::OFF.get(),
2129                                TORUserPMPCFG::OFF.get(),
2130                            ]) as usize,
2131                        ),
2132                    );
2133                } else {
2134                    // This is a single region at an odd index. Thus, modify the
2135                    // latter two pmpcfgX octets for this region.
2136                    csr::CSR.pmpconfig_modify(
2137                        first_region_idx / 2,
2138                        FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
2139                            0x0000FFFF,
2140                            16, // higher two octets
2141                            u32::from_be_bytes([
2142                                0,
2143                                0,
2144                                TORUserPMPCFG::OFF.get(),
2145                                TORUserPMPCFG::OFF.get(),
2146                            ]) as usize,
2147                        ),
2148                    );
2149                }
2150            }
2151
2152            self.user_pmp_enabled.set(false);
2153        }
2154    }
2155
2156    impl<const AVAILABLE_ENTRIES: usize, const MPU_REGIONS: usize> fmt::Display
2157        for KernelProtectionMMLEPMP<AVAILABLE_ENTRIES, MPU_REGIONS>
2158    {
2159        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2160            write!(
2161                f,
2162                " ePMP configuration:\r\n  mseccfg: {:#08X}, user-mode PMP active: {:?}, entries:\r\n",
2163                csr::CSR.mseccfg.get(),
2164                self.user_pmp_enabled.get()
2165            )?;
2166            unsafe { super::format_pmp_entries::<AVAILABLE_ENTRIES>(f) }?;
2167
2168            write!(f, "  Shadow PMP entries for user-mode:\r\n")?;
2169            for (i, shadowed_pmpcfg) in self.shadow_user_pmpcfgs.iter().enumerate() {
2170                let (start_pmpaddr_label, startaddr_pmpaddr, endaddr, mode) =
2171                    if shadowed_pmpcfg.get() == TORUserPMPCFG::OFF {
2172                        (
2173                            "pmpaddr",
2174                            csr::CSR.pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2),
2175                            0,
2176                            "OFF",
2177                        )
2178                    } else {
2179                        (
2180                            "  start",
2181                            csr::CSR
2182                                .pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2)
2183                                .overflowing_shl(2)
2184                                .0,
2185                            csr::CSR
2186                                .pmpaddr_get((i + Self::TOR_REGIONS_OFFSET) * 2 + 1)
2187                                .overflowing_shl(2)
2188                                .0
2189                                | 0b11,
2190                            "TOR",
2191                        )
2192                    };
2193
2194                write!(
2195                    f,
2196                    "  [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}  ) ({}{}{}{})\r\n",
2197                    (i + Self::TOR_REGIONS_OFFSET) * 2 + 1,
2198                    start_pmpaddr_label,
2199                    startaddr_pmpaddr,
2200                    endaddr,
2201                    shadowed_pmpcfg.get().get(),
2202                    mode,
2203                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::l) {
2204                        "l"
2205                    } else {
2206                        "-"
2207                    },
2208                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::r) {
2209                        "r"
2210                    } else {
2211                        "-"
2212                    },
2213                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::w) {
2214                        "w"
2215                    } else {
2216                        "-"
2217                    },
2218                    if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::x) {
2219                        "x"
2220                    } else {
2221                        "-"
2222                    },
2223                )?;
2224            }
2225
2226            Ok(())
2227        }
2228    }
2229}