1use core::cell::Cell;
10use core::cmp;
11use core::fmt;
12use core::num::NonZeroUsize;
13
14use kernel::platform::mpu;
15use kernel::utilities::cells::OptionalCell;
16use kernel::utilities::registers::interfaces::ReadWriteable;
17use kernel::utilities::registers::interfaces::{Readable, Writeable};
18use kernel::utilities::registers::{
19 register_bitfields, register_structs, FieldValue, ReadOnly, ReadWrite,
20};
21use kernel::utilities::StaticRef;
22
23const CORTEXM_MIN_REGION_SIZE: usize = 32;
26
27register_structs! {
28 pub MpuRegisters {
30 (0x0000 => mpu_type: ReadOnly<u32, MPU_TYPE::Register>),
32 (0x0004 => ctrl: ReadWrite<u32, MPU_CTRL::Register>),
34 (0x0008 => rnr: ReadWrite<u32, MPU_RNR::Register>),
36 (0x000C => rbar: ReadWrite<u32, MPU_RBAR::Register>),
38 (0x0010 => rlar: ReadWrite<u32, MPU_RLAR::Register>),
40 (0x0014 => rbar_a1: ReadWrite<u32, MPU_RBAR_A1::Register>),
42 (0x0018 => rlar_a1: ReadWrite<u32, MPU_RLAR_A1::Register>),
44 (0x001C => rbar_a2: ReadWrite<u32, MPU_RBAR_A2::Register>),
46 (0x0020 => rlar_a2: ReadWrite<u32, MPU_RLAR_A2::Register>),
48 (0x0024 => rbar_a3: ReadWrite<u32, MPU_RBAR_A3::Register>),
50 (0x0028 => rlar_a3: ReadWrite<u32, MPU_RLAR_A3::Register>),
52 (0x002c => _reserved0),
53 (0x0030 => mair0: ReadWrite<u32, MPU_MAIR0::Register>),
55 (0x0034 => mair1: ReadWrite<u32, MPU_MAIR1::Register>),
57 (0x0038 => @END),
58 }
59}
60
61register_bitfields![u32,
62 MPU_TYPE [
63 DREGION OFFSET(8) NUMBITS(8) [],
65 SEPARATE OFFSET(0) NUMBITS(1) []
67 ],
68 MPU_CTRL [
69 PRIVDEFENA OFFSET(2) NUMBITS(1) [],
71 HFNMIENA OFFSET(1) NUMBITS(1) [],
73 ENABLE OFFSET(0) NUMBITS(1) []
75 ],
76 MPU_RNR [
77 REGION OFFSET(0) NUMBITS(8) []
79 ],
80 MPU_RBAR [
81 BASE OFFSET(5) NUMBITS(27) [],
83 SH OFFSET(3) NUMBITS(2) [],
85 AP OFFSET(1) NUMBITS(2) [
87 ReadWritePrivilegedOnly = 0b00,
88 ReadWrite = 0b01,
89 ReadOnlyPrivilegedOnly = 0b10,
90 ReadOnly = 0b11
91 ],
92 XN OFFSET(0) NUMBITS(1) [
94 Enable = 0,
95 Disable = 1
96 ]
97 ],
98 MPU_RLAR [
99 LIMIT OFFSET(5) NUMBITS(27) [],
101 PXN OFFSET(4) NUMBITS(1) [
103 Enable = 0,
104 Disable = 1,
105 ],
106 ATTRINDX OFFSET(1) NUMBITS(3) [],
108 ENABLE OFFSET(0) NUMBITS(1) []
110 ],
111 MPU_RBAR_A1 [
112 BASE OFFSET(5) NUMBITS(27) [],
114 SH OFFSET(3) NUMBITS(2) [],
116 AP OFFSET(1) NUMBITS(2) [],
118 XN OFFSET(0) NUMBITS(1) []
120 ],
121 MPU_RLAR_A1 [
122 LIMIT OFFSET(5) NUMBITS(27) [],
124 ATTRINDX OFFSET(1) NUMBITS(3) [],
126 EN OFFSET(0) NUMBITS(1) []
128 ],
129 MPU_RBAR_A2 [
130 BASE OFFSET(5) NUMBITS(27) [],
132 SH OFFSET(3) NUMBITS(2) [],
134 AP OFFSET(1) NUMBITS(2) [],
136 XN OFFSET(0) NUMBITS(1) []
138 ],
139 MPU_RLAR_A2 [
140 LIMIT OFFSET(5) NUMBITS(27) [],
142 ATTRINDX OFFSET(1) NUMBITS(3) [],
144 EN OFFSET(0) NUMBITS(1) []
146 ],
147 MPU_RBAR_A3 [
148 BASE OFFSET(5) NUMBITS(27) [],
150 SH OFFSET(3) NUMBITS(2) [],
152 AP OFFSET(1) NUMBITS(2) [],
154 XN OFFSET(0) NUMBITS(1) []
156 ],
157 MPU_RLAR_A3 [
158 LIMIT OFFSET(5) NUMBITS(27) [],
160 ATTRINDX OFFSET(1) NUMBITS(3) [],
162 EN OFFSET(0) NUMBITS(1) []
164 ],
165 MPU_MAIR0 [
166 ATTR3 OFFSET(24) NUMBITS(8) [],
168 ATTR2 OFFSET(16) NUMBITS(8) [],
170 ATTR1 OFFSET(8) NUMBITS(8) [],
172 ATTR0 OFFSET(0) NUMBITS(8) []
174 ],
175 MPU_MAIR1 [
176 ATTR7 OFFSET(24) NUMBITS(8) [],
178 ATTR6 OFFSET(16) NUMBITS(8) [],
180 ATTR5 OFFSET(8) NUMBITS(8) [],
182 ATTR4 OFFSET(0) NUMBITS(8) []
184 ],
185];
186
187fn align32(initial_ptr: *const u8) -> Result<*const u8, ()> {
189 let memory_offset = initial_ptr.align_offset(32);
190 if memory_offset == usize::MAX {
191 return Err(());
192 }
193
194 let aligned_ptr = initial_ptr.wrapping_add(memory_offset);
195 Ok(aligned_ptr)
196}
197
198pub struct MPU<const NUM_REGIONS: usize> {
203 registers: StaticRef<MpuRegisters>,
205 config_count: Cell<NonZeroUsize>,
208 hardware_is_configured_for: OptionalCell<NonZeroUsize>,
212}
213
214impl<const NUM_REGIONS: usize> MPU<NUM_REGIONS> {
215 pub const unsafe fn new(registers: StaticRef<MpuRegisters>) -> Self {
216 Self {
217 registers,
218 config_count: Cell::new(NonZeroUsize::MIN),
219 hardware_is_configured_for: OptionalCell::empty(),
220 }
221 }
222
223 pub unsafe fn clear_mpu(&self) {
226 self.registers.ctrl.write(MPU_CTRL::ENABLE::CLEAR);
227 }
228}
229
230pub struct CortexMConfig<const NUM_REGIONS: usize> {
236 id: NonZeroUsize,
239 regions: [CortexMRegion; NUM_REGIONS],
241 is_dirty: Cell<bool>,
244}
245
246const APP_MEMORY_REGION_MAX_NUM: usize = 0;
251
252impl<const NUM_REGIONS: usize> fmt::Display for CortexMConfig<NUM_REGIONS> {
253 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
254 write!(f, "\r\n Cortex-M MPU")?;
255 for (i, region) in self.regions.iter().enumerate() {
256 if let Some(location) = region.location {
257 let access_bits = region.rbar_value.read(MPU_RBAR::AP);
258 let access_str = match access_bits {
259 0b00 => "ReadWritePrivilegedOnly",
260 0b01 => "ReadWrite",
261 0b10 => "ReadOnlyPrivilegedOnly",
262 0b11 => "ReadOnly",
263 _ => "ERR",
264 };
265 let start = location.0 as usize;
266 let end = location.1 as usize;
267 write!(
268 f,
269 "\
270 \r\n Region {}: [{:#010X}:{:#010X}], length: {} bytes; {} ({:#x})",
271 i,
272 start,
273 end,
274 end - start,
275 access_str,
276 access_bits,
277 )?;
278 } else {
279 write!(f, "\r\n Region {}: Unused", i)?;
280 }
281 }
282 write!(f, "\r\n")
283 }
284}
285
286impl<const NUM_REGIONS: usize> CortexMConfig<NUM_REGIONS> {
287 fn unused_region_number(&self) -> Option<usize> {
288 for (number, region) in self.regions.iter().enumerate() {
289 if number == APP_MEMORY_REGION_MAX_NUM {
290 continue;
291 }
292 if region.location.is_none() {
293 return Some(number);
294 }
295 }
296 None
297 }
298}
299
300#[derive(Copy, Clone)]
302pub struct CortexMRegion {
303 location: Option<(*const u8, *const u8)>,
304 rbar_value: FieldValue<u32, MPU_RBAR::Register>,
305 rlar_value: FieldValue<u32, MPU_RLAR::Register>,
306 region_num: usize,
307}
308
309impl PartialEq<mpu::Region> for CortexMRegion {
310 fn eq(&self, other: &mpu::Region) -> bool {
311 self.location.is_some_and(|(start, end)| {
312 core::ptr::eq(start, other.start_address())
313 && (end as usize - start as usize) == other.size()
314 })
315 }
316}
317
318impl CortexMRegion {
319 fn new(
320 logical_start: *const u8,
321 logical_size: usize,
322 region_start: *const u8,
323 region_size: usize,
324 region_num: usize,
325 permissions: mpu::Permissions,
326 ) -> Option<CortexMRegion> {
327 if logical_size < CORTEXM_MIN_REGION_SIZE || region_size < logical_size {
330 return None;
331 }
332
333 let (access, execute) = match permissions {
335 mpu::Permissions::ReadWriteExecute => (MPU_RBAR::AP::ReadWrite, MPU_RBAR::XN::Enable),
336 mpu::Permissions::ReadWriteOnly => (MPU_RBAR::AP::ReadWrite, MPU_RBAR::XN::Disable),
337 mpu::Permissions::ReadExecuteOnly => (MPU_RBAR::AP::ReadOnly, MPU_RBAR::XN::Enable),
338 mpu::Permissions::ReadOnly => (MPU_RBAR::AP::ReadOnly, MPU_RBAR::XN::Disable),
339 mpu::Permissions::ExecuteOnly => {
340 (MPU_RBAR::AP::ReadOnlyPrivilegedOnly, MPU_RBAR::XN::Enable)
341 }
342 };
343
344 let rbar_value = MPU_RBAR::BASE.val((logical_start as u32) >> 5)
346 + MPU_RBAR::SH.val(0)
347 + access
348 + execute;
349
350 let logical_end = logical_start as usize + logical_size;
351
352 if logical_end % 32 != 0 {
354 return None;
355 }
356
357 let rlar_value = MPU_RLAR::ENABLE::SET
359 + MPU_RLAR::LIMIT.val((logical_end as u32) >> 5)
360 + MPU_RLAR::PXN::Disable
361 + MPU_RLAR::ATTRINDX.val(0);
362
363 Some(CortexMRegion {
364 location: Some((region_start, region_start.wrapping_add(region_size))),
365 rbar_value,
366 rlar_value,
367 region_num,
368 })
369 }
370
371 fn empty(region_num: usize) -> CortexMRegion {
372 CortexMRegion {
373 location: None,
374 rbar_value: MPU_RBAR::BASE.val(0),
375 rlar_value: MPU_RLAR::ENABLE::CLEAR,
376 region_num,
377 }
378 }
379
380 fn overlaps(&self, other_start: *const u8, other_size: usize) -> bool {
381 let other_start = other_start as usize;
382 let other_end = other_start + other_size;
383
384 let (region_start, region_end) = match self.location {
385 Some((region_start, region_end)) => {
386 let region_start = region_start as usize;
387 let region_end = region_end as usize;
388 (region_start, region_end)
389 }
390 None => return false,
391 };
392
393 region_start < other_end && other_start < region_end
394 }
395}
396
397impl<const NUM_REGIONS: usize> mpu::MPU for MPU<NUM_REGIONS> {
398 type MpuConfig = CortexMConfig<NUM_REGIONS>;
399
400 fn enable_app_mpu(&self) {
401 self.registers
404 .ctrl
405 .write(MPU_CTRL::ENABLE::SET + MPU_CTRL::HFNMIENA::CLEAR + MPU_CTRL::PRIVDEFENA::SET);
406 }
407
408 fn disable_app_mpu(&self) {
409 self.registers.ctrl.write(MPU_CTRL::ENABLE::CLEAR);
412 }
413
414 fn number_total_regions(&self) -> usize {
415 self.registers.mpu_type.read(MPU_TYPE::DREGION) as usize
416 }
417
418 fn new_config(&self) -> Option<Self::MpuConfig> {
419 let id = self.config_count.get();
420 self.config_count.set(id.checked_add(1)?);
421
422 let mut ret = CortexMConfig {
425 id,
426 regions: [CortexMRegion::empty(0); NUM_REGIONS],
427 is_dirty: Cell::new(true),
428 };
429
430 self.reset_config(&mut ret);
431
432 Some(ret)
433 }
434
435 fn reset_config(&self, config: &mut Self::MpuConfig) {
436 for i in 0..NUM_REGIONS {
437 config.regions[i] = CortexMRegion::empty(i);
438 }
439
440 config.is_dirty.set(true);
441 }
442
443 fn allocate_region(
444 &self,
445 unallocated_memory_start: *const u8,
446 unallocated_memory_size: usize,
447 min_region_size: usize,
448 permissions: mpu::Permissions,
449 config: &mut Self::MpuConfig,
450 ) -> Option<mpu::Region> {
451 let mut region_calculation = || {
452 for region in config.regions.iter() {
454 if region.overlaps(unallocated_memory_start, unallocated_memory_size) {
455 return Err(());
456 }
457 }
458
459 let region_num = config.unused_region_number().ok_or(())?;
460
461 let region_start = align32(unallocated_memory_start)?;
462 let region_end = align32(region_start.wrapping_add(min_region_size))?;
463 let region_size = unsafe { region_end.offset_from(region_start) };
464
465 if region_size < 0 {
467 return Err(());
468 }
469
470 if region_size as usize > unallocated_memory_size {
472 return Err(());
473 }
474
475 let region = CortexMRegion::new(
476 region_start,
477 region_size as usize,
478 region_start,
479 region_size as usize,
480 region_num,
481 permissions,
482 )
483 .ok_or(())?;
484
485 config.regions[region_num] = region;
486 config.is_dirty.set(true);
487
488 Ok(mpu::Region::new(region_start, region_size as usize))
489 };
490
491 region_calculation().ok()
492 }
493
494 fn remove_memory_region(
495 &self,
496 region: mpu::Region,
497 config: &mut Self::MpuConfig,
498 ) -> Result<(), ()> {
499 let (idx, _r) = config
500 .regions
501 .iter()
502 .enumerate()
503 .find(|(_idx, r)| **r == region)
504 .ok_or(())?;
505
506 if idx == APP_MEMORY_REGION_MAX_NUM {
507 return Err(());
508 }
509
510 config.regions[idx] = CortexMRegion::empty(idx);
511 config.is_dirty.set(true);
512
513 Ok(())
514 }
515
516 fn allocate_app_memory_region(
517 &self,
518 unallocated_memory_start: *const u8,
519 unallocated_memory_size: usize,
520 min_memory_size: usize,
521 initial_app_memory_size: usize,
522 initial_kernel_memory_size: usize,
523 permissions: mpu::Permissions,
524 config: &mut Self::MpuConfig,
525 ) -> Option<(*const u8, usize)> {
526 let mut region_calculation = || {
527 for region in config.regions.iter() {
530 if region.overlaps(unallocated_memory_start, unallocated_memory_size) {
531 return Err(());
532 }
533 }
534
535 let memory_size = cmp::max(
537 min_memory_size,
538 initial_app_memory_size + initial_kernel_memory_size,
539 );
540
541 let region_start = align32(unallocated_memory_start)?;
544 let region_end = align32(region_start.wrapping_add(memory_size))?;
545 let region_size = unsafe { region_end.offset_from(region_start) };
546
547 if region_size < 0 {
549 return Err(());
550 }
551
552 if region_size as usize > unallocated_memory_size {
554 return Err(());
555 }
556
557 let logical_start = region_start;
558 let logical_end = align32(logical_start.wrapping_add(initial_app_memory_size))?;
559 let logical_size = unsafe { logical_end.offset_from(logical_start) };
560
561 if logical_size < 0 {
563 return Err(());
564 }
565
566 let region = CortexMRegion::new(
567 logical_start,
568 logical_size as usize,
569 region_start,
570 region_size as usize,
571 0,
572 permissions,
573 )
574 .ok_or(())?;
575
576 config.regions[0] = region;
577 config.is_dirty.set(true);
578
579 Ok((region_start, memory_size))
580 };
581
582 region_calculation().ok()
583 }
584
585 fn update_app_memory_region(
586 &self,
587 app_memory_break: *const u8,
588 kernel_memory_break: *const u8,
589 permissions: mpu::Permissions,
590 config: &mut Self::MpuConfig,
591 ) -> Result<(), ()> {
592 let (region_start, region_end) = config.regions[0].location.ok_or(())?;
595 if (app_memory_break as usize) < (region_start as usize)
599 || (app_memory_break as usize) >= (region_end as usize)
600 {
601 return Err(());
602 }
603
604 if (kernel_memory_break as usize) < (region_start as usize)
605 || (kernel_memory_break as usize) >= (region_end as usize)
606 {
607 return Err(());
608 }
609
610 if (app_memory_break as usize) > (kernel_memory_break as usize) {
612 return Err(());
613 }
614
615 let logical_start = region_start;
616 let logical_end = align32(app_memory_break)?;
617 let logical_size = unsafe { logical_end.offset_from(logical_start) };
618
619 if logical_size < 0 {
621 return Err(());
622 }
623
624 if (logical_end as usize) > (kernel_memory_break as usize) {
626 return Err(());
627 }
628
629 let region_size = unsafe { region_end.offset_from(region_start) };
630
631 let region = CortexMRegion::new(
632 logical_start,
633 logical_size as usize,
634 region_start,
635 region_size as usize,
636 0,
637 permissions,
638 )
639 .ok_or(())?;
640
641 config.regions[0] = region;
642 config.is_dirty.set(true);
643
644 Ok(())
645 }
646
647 fn configure_mpu(&self, config: &Self::MpuConfig) {
648 self.registers
650 .mair0
651 .modify(MPU_MAIR0::ATTR0.val(0b0100_0100));
652 if !self.hardware_is_configured_for.contains(&config.id) || config.is_dirty.get() {
655 for region in config.regions.iter() {
657 self.registers
658 .rnr
659 .modify(MPU_RNR::REGION.val(region.region_num as u32));
660 self.registers.rbar.write(region.rbar_value);
661 self.registers.rlar.write(region.rlar_value);
662 }
663 self.hardware_is_configured_for.set(config.id);
664 config.is_dirty.set(false);
665 }
666 }
667}