1use crate::registers::bits32::paging::{
8 PAddr, PDEntry, PTEntry, PTFlags, PD, PDFLAGS, PT, PTFLAGS,
9};
10use crate::registers::controlregs::{self, CR0, CR4};
11use crate::registers::tlb;
12use core::{cmp, fmt, mem};
13use kernel::platform::mpu::{Permissions, Region, MPU};
14use kernel::utilities::cells::MapCell;
15use tock_registers::LocalRegisterCopy;
16
17use core::cell::RefCell;
18
19const MAX_PTE_ENTRY: usize = 1024;
24const PAGE_BITS_4K: usize = 12;
25const PAGE_SIZE_4K: usize = 1 << PAGE_BITS_4K;
26const PAGE_SIZE_4M: usize = 0x400000;
27const MAX_REGIONS: usize = 8;
28const PAGE_TABLE_MASK: usize = MAX_PTE_ENTRY - 1;
29
30#[derive(Copy, Clone)]
31struct AllocateRegion {
32 start_index_page: usize,
33 pages: usize,
34 flags_set: PTFlags,
35 flags_clear: PTFlags,
36}
37
38#[derive(Copy, Clone)]
39struct PageTableConfig {
40 start_ram_section: usize,
41 ram_pages: usize,
42 start_app_section: usize,
43 app_pages: usize,
44 last_page_owned: usize,
45 kernel_first_page: usize,
46 app_ram_region: usize,
47 alloc_regions: [Option<AllocateRegion>; MAX_REGIONS],
48}
49
50impl PageTableConfig {
51 pub fn new() -> Self {
52 Self {
53 start_ram_section: 0,
54 ram_pages: 0,
55 start_app_section: 0,
56 app_pages: 0,
57 last_page_owned: 0,
58 kernel_first_page: 0,
59 app_ram_region: 0,
60 alloc_regions: [None; MAX_REGIONS],
61 }
62 }
63 pub fn set_app(&mut self, start: usize, sections: usize) {
64 self.start_app_section = start;
65 self.app_pages = sections;
66 }
67 pub fn set_ram(&mut self, start: usize, sections: usize) {
68 self.start_ram_section = start;
69 self.ram_pages = sections;
70 }
71 pub fn get_ram(&self) -> usize {
72 self.start_ram_section
73 }
74}
75
76pub struct MemoryProtectionConfig {
77 num_regions: usize,
78 ram_regions: usize,
79 page_information: PageTableConfig,
80}
81
82impl Default for MemoryProtectionConfig {
83 fn default() -> Self {
84 Self::new()
85 }
86}
87
88impl MemoryProtectionConfig {
89 pub fn new() -> Self {
90 Self {
91 num_regions: 0,
92 ram_regions: 0,
93 page_information: PageTableConfig::new(),
94 }
95 }
96}
97
98impl fmt::Display for MemoryProtectionConfig {
99 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
100 writeln!(f)?;
101 writeln!(f, " Paging Configuration:")?;
102
103 writeln!(
104 f,
105 " Total regions: {:10} RAM regions: {:10}",
106 self.num_regions, self.ram_regions
107 )?;
108
109 let flash_start = self.page_information.start_app_section * PAGE_SIZE_4K;
110 let flash_length = self.page_information.app_pages * PAGE_SIZE_4K;
111 writeln!(
112 f,
113 " Flash start: {:#010x} Length: {:#10x}",
114 flash_start, flash_length
115 )?;
116
117 let ram_start = self.page_information.start_ram_section * PAGE_SIZE_4K;
118 let ram_length = self.page_information.ram_pages * PAGE_SIZE_4K;
119 writeln!(
120 f,
121 " RAM start: {:#010x} Length: {:#10x}",
122 ram_start, ram_length
123 )?;
124
125 let kernel_start = self.page_information.kernel_first_page * PAGE_SIZE_4K;
126 let kernel_length = (self.page_information.last_page_owned + 1
127 - self.page_information.kernel_first_page)
128 * PAGE_SIZE_4K;
129 writeln!(
130 f,
131 " Kernel start: {:#010x} Length: {:#10x}",
132 kernel_start, kernel_length
133 )?;
134 writeln!(f)?;
135
136 Ok(())
137 }
138}
139
140pub struct PagingMPU<'a> {
141 num_regions: usize,
142 config_pages: MapCell<PageTableConfig>,
143 page_dir_paddr: usize,
144 page_table_paddr: usize,
145 pd: RefCell<&'a mut PD>,
146 pt: RefCell<&'a mut PT>,
147}
148
149fn calc_page_index(memory_address: usize) -> usize {
150 memory_address / PAGE_SIZE_4K
151}
152
153fn calc_alloc_pages(memory_size: usize) -> usize {
155 memory_size.next_multiple_of(PAGE_SIZE_4K) / PAGE_SIZE_4K
156}
157
158impl<'a> PagingMPU<'a> {
159 pub unsafe fn new(
160 page_dir: &'a mut PD,
161 page_dir_paddr: usize,
162 page_table: &'a mut PT,
163 page_table_paddr: usize,
164 ) -> Self {
165 let page_dir = RefCell::new(page_dir);
166 let page_table = RefCell::new(page_table);
167
168 Self {
169 num_regions: 0,
170 config_pages: MapCell::empty(),
171 page_dir_paddr,
172 page_table_paddr,
173 pd: page_dir,
174 pt: page_table,
175 }
176 }
177
178 pub unsafe fn iterate_pt<C>(&self, mut closure: C)
182 where
183 C: FnMut(usize, &mut PTEntry),
184 {
185 let mut page_table = self.pt.borrow_mut();
186 for (n, entry) in page_table.iter_mut().enumerate() {
187 closure(n, entry);
188 }
189 }
190
191 pub fn pt_from_addr<C>(&self, mut closure: C, virtual_addr: usize)
195 where
196 C: FnMut(&mut PTEntry),
197 {
198 let mut page_table = self.pt.borrow_mut();
199 let mut page_index = virtual_addr >> PAGE_BITS_4K;
200 page_index &= PAGE_TABLE_MASK;
201
202 closure(&mut page_table[page_index]);
203 }
204
205 pub unsafe fn initialize_page_tables(&self) {
209 let mut page_directory = self.pd.borrow_mut();
210
211 for (n, entry) in page_directory.iter_mut().enumerate() {
215 let mut entry_flags = LocalRegisterCopy::new(0);
216 entry_flags.write(PDFLAGS::PS::SET + PDFLAGS::RW::SET + PDFLAGS::P::SET);
217 *entry = PDEntry::new(PAddr::from(PAGE_SIZE_4M * n), entry_flags);
219 }
220
221 let mut page_directory_flags = LocalRegisterCopy::new(0);
224 page_directory_flags.write(PDFLAGS::P::SET + PDFLAGS::RW::SET + PDFLAGS::US::SET);
225 page_directory[0] = PDEntry::new(PAddr::from(self.page_table_paddr), page_directory_flags);
226
227 let mut page_table = self.pt.borrow_mut();
229 let mut page_table_flags = LocalRegisterCopy::new(0);
230 page_table_flags.write(PTFLAGS::P::SET + PTFLAGS::RW::SET);
231 for (n, entry) in page_table.iter_mut().enumerate() {
232 *entry = PTEntry::new(PAddr::from(PAGE_SIZE_4K * n), page_table_flags);
233 }
234 }
235
236 unsafe fn enable_paging(&self) {
243 let mut cr4_value = unsafe { controlregs::cr4() };
245 if !cr4_value.is_set(CR4::CR4_ENABLE_PSE) {
246 cr4_value.modify(CR4::CR4_ENABLE_PSE::SET);
247 unsafe {
248 controlregs::cr4_write(cr4_value);
249 }
250 }
251
252 unsafe {
253 controlregs::cr3_write(self.page_dir_paddr as u64);
255
256 let mut cr0_value = controlregs::cr0();
258 cr0_value.modify(CR0::CR0_ENABLE_PAGING::SET);
259 controlregs::cr0_write(cr0_value);
260 }
261 }
262
263 pub fn init(&self) {
268 unsafe {
269 self.initialize_page_tables();
270 self.enable_paging();
271 }
272 }
273}
274
275impl fmt::Display for PagingMPU<'_> {
276 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
277 write!(f, "Num_regions: {:?}, ...", self.num_regions,)
278 }
279}
280
281unsafe impl MPU for PagingMPU<'_> {
285 type MpuConfig = MemoryProtectionConfig;
286
287 fn new_config(&self) -> Option<Self::MpuConfig> {
288 Some(MemoryProtectionConfig {
289 num_regions: 0,
290 ram_regions: 0,
291 page_information: PageTableConfig::new(),
292 })
293 }
294
295 fn reset_config(&self, config: &mut Self::MpuConfig) {
296 config.num_regions = 0;
297 config.ram_regions = 0;
298 config.page_information = PageTableConfig::new();
299 }
300
301 fn enable_app_mpu(&self) {}
303
304 unsafe fn disable_app_mpu(&self) {}
306
307 fn number_total_regions(&self) -> usize {
309 mem::size_of::<PT>() / mem::size_of::<PTEntry>()
310 }
311
312 fn allocate_region(
313 &self,
314 unallocated_memory_start: *const u8,
315 unallocated_memory_size: usize,
316 min_region_size: usize,
317 permissions: Permissions,
318 config: &mut Self::MpuConfig,
319 ) -> Option<Region> {
320 let aligned_address_start: usize =
322 (unallocated_memory_start as usize).next_multiple_of(PAGE_SIZE_4K);
323 let page_index: usize = calc_page_index(aligned_address_start);
324
325 let pages_alloc_requested: usize = calc_alloc_pages(min_region_size);
326
327 let total_page_aligned_size: usize = pages_alloc_requested * PAGE_SIZE_4K;
328
329 if aligned_address_start + total_page_aligned_size
330 > unallocated_memory_start as usize + unallocated_memory_size
331 {
332 return None;
333 }
334
335 for r in config.page_information.alloc_regions.iter().flatten() {
337 if r.start_index_page == page_index && r.pages == pages_alloc_requested {
338 return Some(Region::new(
339 aligned_address_start as *const u8,
340 total_page_aligned_size,
341 ));
342 }
343 }
344
345 let mut pages_attr = LocalRegisterCopy::new(0);
349 match permissions {
350 Permissions::ReadWriteExecute => {
351 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
352 }
353 Permissions::ReadWriteOnly => {
354 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
355 }
356 Permissions::ReadExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
357 Permissions::ReadOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
358 Permissions::ExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
359 }
360
361 let mut pages_clear = LocalRegisterCopy::new(0);
365 match permissions {
366 Permissions::ReadWriteOnly => pages_clear.write(PTFLAGS::P::SET + PTFLAGS::RW::SET),
367 _ => pages_clear.write(PTFLAGS::P::SET),
368 }
369
370 if page_index > MAX_PTE_ENTRY || page_index + pages_alloc_requested > MAX_PTE_ENTRY {
372 return None;
373 }
374
375 let end_of_unallocated_memory: usize =
377 unallocated_memory_start as usize + unallocated_memory_size;
378 let end_of_allocated_memory: usize = aligned_address_start + total_page_aligned_size - 1;
379 if calc_page_index(end_of_allocated_memory) > calc_page_index(end_of_unallocated_memory) {
380 None
381 } else {
382 let index = config
384 .page_information
385 .alloc_regions
386 .iter_mut()
387 .position(|r| r.is_none());
388
389 match index {
390 Some(i) => {
391 config.page_information.alloc_regions[i] = Some(AllocateRegion {
392 flags_set: pages_attr,
393 flags_clear: pages_clear,
394 start_index_page: page_index,
395 pages: pages_alloc_requested,
396 });
397 }
398 None => return None,
399 }
400
401 let last_page = page_index + pages_alloc_requested;
402
403 let mut sram_page_table = self.pt.borrow_mut();
404
405 for current_page in page_index..=last_page {
406 sram_page_table[current_page] =
407 PTEntry::new(sram_page_table[current_page].address(), pages_attr);
408 config.num_regions += 1;
409 }
410
411 config
412 .page_information
413 .set_app(page_index, config.num_regions);
414
415 Some(Region::new(
416 aligned_address_start as *const u8,
417 total_page_aligned_size,
418 ))
419 }
420 }
421
422 fn remove_memory_region(&self, region: Region, config: &mut Self::MpuConfig) -> Result<(), ()> {
423 unsafe {
424 let start_page = calc_page_index(region.start_address() as usize);
425 let last_page = start_page + calc_alloc_pages(region.size());
426
427 let index = config.page_information.alloc_regions.iter().position(|r| {
429 if let Some(r) = r {
430 if r.start_index_page == start_page && r.pages == last_page - start_page {
431 return true;
432 }
433 }
434 false
435 });
436
437 match index {
439 Some(i) => {
440 config.page_information.alloc_regions[i] = None;
441 }
442 None => return Err(()),
443 }
444
445 let mut sram_page_table = self.pt.borrow_mut();
447 for page_index in start_page..=last_page {
448 let mut sram_page_table_flags = LocalRegisterCopy::new(0);
450 sram_page_table_flags.write(PTFLAGS::P::SET);
451 sram_page_table[page_index] =
452 PTEntry::new(sram_page_table[page_index].address(), sram_page_table_flags);
453
454 let inv_page = page_index * PAGE_SIZE_4K;
456 tlb::flush(inv_page);
457 config.num_regions -= 1;
458 }
459 }
460 Ok(())
461 }
462
463 fn allocate_app_memory_region(
464 &self,
465 unallocated_memory_start: *const u8,
466 unallocated_memory_size: usize,
467 min_memory_size: usize,
468 initial_app_memory_size: usize,
469 initial_kernel_memory_size: usize,
470 permissions: Permissions,
471 config: &mut Self::MpuConfig,
472 ) -> Option<(*const u8, usize)> {
473 let aligned_address_app: usize =
477 (unallocated_memory_start as usize).next_multiple_of(PAGE_SIZE_4K);
478 let last_unallocated_memory: usize =
479 (unallocated_memory_start as usize) + unallocated_memory_size;
480 let start_mem_page: usize = calc_page_index(aligned_address_app);
481
482 let last_page_app_mem: usize = calc_page_index(last_unallocated_memory);
483
484 let aligned_app_mem_size: usize = initial_app_memory_size.next_multiple_of(PAGE_SIZE_4K);
486 let aligned_kernel_mem_size: usize =
487 initial_kernel_memory_size.next_multiple_of(PAGE_SIZE_4K);
488 let aligned_min_mem_size: usize = min_memory_size.next_multiple_of(PAGE_SIZE_4K);
489
490 let mut pages_attr = LocalRegisterCopy::new(0);
491 match permissions {
492 Permissions::ReadWriteExecute => {
493 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
494 }
495 Permissions::ReadWriteOnly => {
496 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
497 }
498 Permissions::ReadExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
499 Permissions::ReadOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
500 Permissions::ExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
501 }
502
503 let total_memory_size = cmp::max(
506 aligned_min_mem_size + aligned_kernel_mem_size,
507 aligned_app_mem_size + aligned_kernel_mem_size,
508 );
509 let pages_alloc_requested: usize = calc_alloc_pages(total_memory_size);
510 let kernel_alloc_pages: usize = calc_alloc_pages(aligned_kernel_mem_size);
511
512 if start_mem_page > MAX_PTE_ENTRY || start_mem_page + pages_alloc_requested > MAX_PTE_ENTRY
514 {
515 return None;
516 }
517 let end_of_unallocated_memory: usize =
519 unallocated_memory_start as usize + unallocated_memory_size;
520 let end_of_allocated_memory: usize = aligned_address_app + total_memory_size;
521 if end_of_allocated_memory > end_of_unallocated_memory {
522 None
523 } else {
524 let allocate_index = config
525 .page_information
526 .alloc_regions
527 .iter_mut()
528 .position(|r| r.is_none());
529
530 allocate_index?;
531
532 let allocate_index = allocate_index.unwrap();
533
534 let mut alloc_regions_flags_clear = LocalRegisterCopy::new(0);
535 alloc_regions_flags_clear.write(PTFLAGS::P::SET + PTFLAGS::RW::SET);
536 config.page_information.alloc_regions[allocate_index] = Some(AllocateRegion {
537 flags_set: pages_attr,
538 flags_clear: alloc_regions_flags_clear,
539 start_index_page: start_mem_page,
540 pages: calc_alloc_pages(aligned_app_mem_size),
541 });
542
543 let last_page = start_mem_page + calc_alloc_pages(aligned_app_mem_size);
544 let mut sram_page_table = self.pt.borrow_mut();
545 for page_index in start_mem_page..=last_page {
546 sram_page_table[page_index] =
548 PTEntry::new(sram_page_table[page_index].address(), pages_attr);
549 config.ram_regions += 1;
550 }
551
552 config
553 .page_information
554 .set_ram(start_mem_page, config.ram_regions);
555 config.page_information.last_page_owned = last_page_app_mem;
556 config.page_information.kernel_first_page = last_page_app_mem - kernel_alloc_pages;
557 config.page_information.app_ram_region = allocate_index;
558 Some((aligned_address_app as *const u8, total_memory_size))
559 }
560 }
561
562 fn update_app_memory_region(
563 &self,
564 app_memory_break: *const u8,
565 kernel_memory_break: *const u8,
566 _permissions: Permissions,
567 config: &mut Self::MpuConfig,
568 ) -> Result<(), ()> {
569 let page_in_app_break = calc_page_index(app_memory_break as usize);
573
574 let page_in_kernel_break = calc_page_index(kernel_memory_break as usize);
575
576 let last_page_currently =
579 config.page_information.get_ram() + config.page_information.ram_pages - 1;
580 let num_of_ram_pages = page_in_app_break - config.page_information.get_ram();
581
582 if (app_memory_break as usize) > (kernel_memory_break as usize)
586 || (page_in_app_break >= page_in_kernel_break)
587 || page_in_kernel_break > config.page_information.last_page_owned
588 {
589 return Err(());
590 }
591 if last_page_currently != page_in_app_break
593 || num_of_ram_pages != config.page_information.ram_pages
594 {
595 if let Some(r) = config.page_information.alloc_regions
596 [config.page_information.app_ram_region]
597 .as_mut()
598 {
599 r.pages = num_of_ram_pages;
600 }
601 config.page_information.ram_pages = num_of_ram_pages;
602 config.page_information.kernel_first_page = page_in_kernel_break;
603 }
604
605 Ok(())
606 }
607
608 unsafe fn configure_mpu(&self, config: &Self::MpuConfig) {
609 self.config_pages.map(|current_config| {
610 unsafe {
611 let mut sram_page_table = self.pt.borrow_mut();
612 for r in current_config.alloc_regions.iter().flatten() {
613 let init_region_page = r.start_index_page;
614 let last_region_page = init_region_page + r.pages;
615 for page_index in init_region_page..=last_region_page {
616 sram_page_table[page_index] =
618 PTEntry::new(sram_page_table[page_index].address(), r.flags_clear);
619 }
620 }
621
622 tlb::flush_all();
624 }
625 });
626 self.config_pages.put(config.page_information);
628
629 self.config_pages.map(|app_config| {
630 unsafe {
631 let mut sram_page_table = self.pt.borrow_mut();
632 for r in app_config.alloc_regions.iter().flatten() {
633 let init_region_page = r.start_index_page;
634 let last_region_page = init_region_page + r.pages;
635 for page_index in init_region_page..=last_region_page {
636 sram_page_table[page_index] =
638 PTEntry::new(sram_page_table[page_index].address(), r.flags_set);
639 }
640 }
641 tlb::flush_all();
643 }
644 });
645 }
646}