1use crate::registers::bits32::paging::{
8 PAddr, PDEntry, PTEntry, PTFlags, PD, PDFLAGS, PT, PTFLAGS,
9};
10use crate::registers::controlregs::{self, CR0, CR4};
11use crate::registers::tlb;
12use core::{cmp, fmt, mem};
13use kernel::platform::mpu::{Permissions, Region, MPU};
14use kernel::utilities::cells::MapCell;
15use tock_registers::LocalRegisterCopy;
16
17use core::cell::RefCell;
18
19const MAX_PTE_ENTRY: usize = 1024;
24const PAGE_BITS_4K: usize = 12;
25const PAGE_SIZE_4K: usize = 1 << PAGE_BITS_4K;
26const PAGE_SIZE_4M: usize = 0x400000;
27const MAX_REGIONS: usize = 8;
28const PAGE_TABLE_MASK: usize = MAX_PTE_ENTRY - 1;
29
30#[derive(Copy, Clone)]
31struct AllocateRegion {
32 start_index_page: usize,
33 pages: usize,
34 flags_set: PTFlags,
35 flags_clear: PTFlags,
36}
37
38#[derive(Copy, Clone)]
39struct PageTableConfig {
40 start_ram_section: usize,
41 ram_pages: usize,
42 start_app_section: usize,
43 app_pages: usize,
44 last_page_owned: usize,
45 kernel_first_page: usize,
46 app_ram_region: usize,
47 alloc_regions: [Option<AllocateRegion>; MAX_REGIONS],
48}
49
50impl PageTableConfig {
51 pub fn new() -> Self {
52 Self {
53 start_ram_section: 0,
54 ram_pages: 0,
55 start_app_section: 0,
56 app_pages: 0,
57 last_page_owned: 0,
58 kernel_first_page: 0,
59 app_ram_region: 0,
60 alloc_regions: [None; MAX_REGIONS],
61 }
62 }
63 pub fn set_app(&mut self, start: usize, sections: usize) {
64 self.start_app_section = start;
65 self.app_pages = sections;
66 }
67 pub fn set_ram(&mut self, start: usize, sections: usize) {
68 self.start_ram_section = start;
69 self.ram_pages = sections;
70 }
71 pub fn get_ram(&self) -> usize {
72 self.start_ram_section
73 }
74}
75
76pub struct MemoryProtectionConfig {
77 num_regions: usize,
78 ram_regions: usize,
79 page_information: PageTableConfig,
80}
81
82impl Default for MemoryProtectionConfig {
83 fn default() -> Self {
84 Self::new()
85 }
86}
87
88impl MemoryProtectionConfig {
89 pub fn new() -> Self {
90 Self {
91 num_regions: 0,
92 ram_regions: 0,
93 page_information: PageTableConfig::new(),
94 }
95 }
96}
97
98impl fmt::Display for MemoryProtectionConfig {
99 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
100 writeln!(f)?;
101 writeln!(f, " Paging Configuration:")?;
102
103 writeln!(
104 f,
105 " Total regions: {:10} RAM regions: {:10}",
106 self.num_regions, self.ram_regions
107 )?;
108
109 let flash_start = self.page_information.start_app_section * PAGE_SIZE_4K;
110 let flash_length = self.page_information.app_pages * PAGE_SIZE_4K;
111 writeln!(
112 f,
113 " Flash start: {:#010x} Length: {:#10x}",
114 flash_start, flash_length
115 )?;
116
117 let ram_start = self.page_information.start_ram_section * PAGE_SIZE_4K;
118 let ram_length = self.page_information.ram_pages * PAGE_SIZE_4K;
119 writeln!(
120 f,
121 " RAM start: {:#010x} Length: {:#10x}",
122 ram_start, ram_length
123 )?;
124
125 let kernel_start = self.page_information.kernel_first_page * PAGE_SIZE_4K;
126 let kernel_length = (self.page_information.last_page_owned + 1
127 - self.page_information.kernel_first_page)
128 * PAGE_SIZE_4K;
129 writeln!(
130 f,
131 " Kernel start: {:#010x} Length: {:#10x}",
132 kernel_start, kernel_length
133 )?;
134 writeln!(f)?;
135
136 Ok(())
137 }
138}
139
140pub struct PagingMPU<'a> {
141 num_regions: usize,
142 config_pages: MapCell<PageTableConfig>,
143 page_dir_paddr: usize,
144 page_table_paddr: usize,
145 pd: RefCell<&'a mut PD>,
146 pt: RefCell<&'a mut PT>,
147}
148
149fn calc_page_index(memory_address: usize) -> usize {
150 memory_address / PAGE_SIZE_4K
151}
152
153fn calc_alloc_pages(memory_size: usize) -> usize {
155 memory_size.next_multiple_of(PAGE_SIZE_4K) / PAGE_SIZE_4K
156}
157
158impl<'a> PagingMPU<'a> {
159 pub unsafe fn new(
160 page_dir: &'a mut PD,
161 page_dir_paddr: usize,
162 page_table: &'a mut PT,
163 page_table_paddr: usize,
164 ) -> Self {
165 let page_dir = RefCell::new(page_dir);
166 let page_table = RefCell::new(page_table);
167
168 Self {
169 num_regions: 0,
170 config_pages: MapCell::empty(),
171 page_dir_paddr,
172 page_table_paddr,
173 pd: page_dir,
174 pt: page_table,
175 }
176 }
177
178 pub unsafe fn iterate_pt<C>(&self, mut closure: C)
182 where
183 C: FnMut(usize, &mut PTEntry),
184 {
185 let mut page_table = self.pt.borrow_mut();
186 for (n, entry) in page_table.iter_mut().enumerate() {
187 closure(n, entry);
188 }
189 }
190
191 pub fn pt_from_addr<C>(&self, mut closure: C, virtual_addr: usize)
195 where
196 C: FnMut(&mut PTEntry),
197 {
198 let mut page_table = self.pt.borrow_mut();
199 let mut page_index = virtual_addr >> PAGE_BITS_4K;
200 page_index &= PAGE_TABLE_MASK;
201
202 closure(&mut page_table[page_index]);
203 }
204
205 pub unsafe fn initialize_page_tables(&self) {
209 let mut page_directory = self.pd.borrow_mut();
210
211 for (n, entry) in page_directory.iter_mut().enumerate() {
215 let mut entry_flags = LocalRegisterCopy::new(0);
216 entry_flags.write(PDFLAGS::PS::SET + PDFLAGS::RW::SET + PDFLAGS::P::SET);
217 *entry = PDEntry::new(PAddr::from(PAGE_SIZE_4M * n), entry_flags);
219 }
220
221 let mut page_directory_flags = LocalRegisterCopy::new(0);
224 page_directory_flags.write(PDFLAGS::P::SET + PDFLAGS::RW::SET + PDFLAGS::US::SET);
225 page_directory[0] = PDEntry::new(PAddr::from(self.page_table_paddr), page_directory_flags);
226
227 let mut page_table = self.pt.borrow_mut();
229 let mut page_table_flags = LocalRegisterCopy::new(0);
230 page_table_flags.write(PTFLAGS::P::SET + PTFLAGS::RW::SET);
231 for (n, entry) in page_table.iter_mut().enumerate() {
232 *entry = PTEntry::new(PAddr::from(PAGE_SIZE_4K * n), page_table_flags);
233 }
234 }
235
236 unsafe fn enable_paging(&self) {
243 let mut cr4_value = unsafe { controlregs::cr4() };
245 if !cr4_value.is_set(CR4::CR4_ENABLE_PSE) {
246 cr4_value.modify(CR4::CR4_ENABLE_PSE::SET);
247 unsafe {
248 controlregs::cr4_write(cr4_value);
249 }
250 }
251
252 unsafe {
253 controlregs::cr3_write(self.page_dir_paddr as u64);
255
256 let mut cr0_value = controlregs::cr0();
258 cr0_value.modify(CR0::CR0_ENABLE_PAGING::SET);
259 controlregs::cr0_write(cr0_value);
260 }
261 }
262
263 pub fn init(&self) {
268 unsafe {
269 self.initialize_page_tables();
270 self.enable_paging();
271 }
272 }
273}
274
275impl fmt::Display for PagingMPU<'_> {
276 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
277 write!(f, "Num_regions: {:?}, ...", self.num_regions,)
278 }
279}
280
281impl MPU for PagingMPU<'_> {
282 type MpuConfig = MemoryProtectionConfig;
283
284 fn new_config(&self) -> Option<Self::MpuConfig> {
285 Some(MemoryProtectionConfig {
286 num_regions: 0,
287 ram_regions: 0,
288 page_information: PageTableConfig::new(),
289 })
290 }
291
292 fn reset_config(&self, config: &mut Self::MpuConfig) {
293 config.num_regions = 0;
294 config.ram_regions = 0;
295 config.page_information = PageTableConfig::new();
296 }
297
298 fn enable_app_mpu(&self) {}
300
301 fn disable_app_mpu(&self) {}
303
304 fn number_total_regions(&self) -> usize {
306 mem::size_of::<PT>() / mem::size_of::<PTEntry>()
307 }
308
309 fn allocate_region(
310 &self,
311 unallocated_memory_start: *const u8,
312 unallocated_memory_size: usize,
313 min_region_size: usize,
314 permissions: Permissions,
315 config: &mut Self::MpuConfig,
316 ) -> Option<Region> {
317 let aligned_address_start: usize =
319 (unallocated_memory_start as usize).next_multiple_of(PAGE_SIZE_4K);
320 let page_index: usize = calc_page_index(aligned_address_start);
321
322 let pages_alloc_requested: usize = calc_alloc_pages(min_region_size);
323
324 let total_page_aligned_size: usize = pages_alloc_requested * PAGE_SIZE_4K;
325
326 if aligned_address_start + total_page_aligned_size
327 > unallocated_memory_start as usize + unallocated_memory_size
328 {
329 return None;
330 }
331
332 for r in config.page_information.alloc_regions.iter().flatten() {
334 if r.start_index_page == page_index && r.pages == pages_alloc_requested {
335 return Some(Region::new(
336 aligned_address_start as *const u8,
337 total_page_aligned_size,
338 ));
339 }
340 }
341
342 let mut pages_attr = LocalRegisterCopy::new(0);
346 match permissions {
347 Permissions::ReadWriteExecute => {
348 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
349 }
350 Permissions::ReadWriteOnly => {
351 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
352 }
353 Permissions::ReadExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
354 Permissions::ReadOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
355 Permissions::ExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
356 }
357
358 let mut pages_clear = LocalRegisterCopy::new(0);
362 match permissions {
363 Permissions::ReadWriteOnly => pages_clear.write(PTFLAGS::P::SET + PTFLAGS::RW::SET),
364 _ => pages_clear.write(PTFLAGS::P::SET),
365 }
366
367 if page_index > MAX_PTE_ENTRY || page_index + pages_alloc_requested > MAX_PTE_ENTRY {
369 return None;
370 }
371
372 let end_of_unallocated_memory: usize =
374 unallocated_memory_start as usize + unallocated_memory_size;
375 let end_of_allocated_memory: usize = aligned_address_start + total_page_aligned_size - 1;
376 if calc_page_index(end_of_allocated_memory) > calc_page_index(end_of_unallocated_memory) {
377 None
378 } else {
379 let index = config
381 .page_information
382 .alloc_regions
383 .iter_mut()
384 .position(|r| r.is_none());
385
386 match index {
387 Some(i) => {
388 config.page_information.alloc_regions[i] = Some(AllocateRegion {
389 flags_set: pages_attr,
390 flags_clear: pages_clear,
391 start_index_page: page_index,
392 pages: pages_alloc_requested,
393 });
394 }
395 None => return None,
396 }
397
398 let last_page = page_index + pages_alloc_requested;
399
400 let mut sram_page_table = self.pt.borrow_mut();
401
402 for current_page in page_index..=last_page {
403 sram_page_table[current_page] =
404 PTEntry::new(sram_page_table[current_page].address(), pages_attr);
405 config.num_regions += 1;
406 }
407
408 config
409 .page_information
410 .set_app(page_index, config.num_regions);
411
412 Some(Region::new(
413 aligned_address_start as *const u8,
414 total_page_aligned_size,
415 ))
416 }
417 }
418
419 fn remove_memory_region(&self, region: Region, config: &mut Self::MpuConfig) -> Result<(), ()> {
420 unsafe {
421 let start_page = calc_page_index(region.start_address() as usize);
422 let last_page = start_page + calc_alloc_pages(region.size());
423
424 let index = config.page_information.alloc_regions.iter().position(|r| {
426 if let Some(r) = r {
427 if r.start_index_page == start_page && r.pages == last_page - start_page {
428 return true;
429 }
430 }
431 false
432 });
433
434 match index {
436 Some(i) => {
437 config.page_information.alloc_regions[i] = None;
438 }
439 None => return Err(()),
440 }
441
442 let mut sram_page_table = self.pt.borrow_mut();
444 for page_index in start_page..=last_page {
445 let mut sram_page_table_flags = LocalRegisterCopy::new(0);
447 sram_page_table_flags.write(PTFLAGS::P::SET);
448 sram_page_table[page_index] =
449 PTEntry::new(sram_page_table[page_index].address(), sram_page_table_flags);
450
451 let inv_page = page_index * PAGE_SIZE_4K;
453 tlb::flush(inv_page);
454 config.num_regions -= 1;
455 }
456 }
457 Ok(())
458 }
459
460 fn allocate_app_memory_region(
461 &self,
462 unallocated_memory_start: *const u8,
463 unallocated_memory_size: usize,
464 min_memory_size: usize,
465 initial_app_memory_size: usize,
466 initial_kernel_memory_size: usize,
467 permissions: Permissions,
468 config: &mut Self::MpuConfig,
469 ) -> Option<(*const u8, usize)> {
470 let aligned_address_app: usize =
474 (unallocated_memory_start as usize).next_multiple_of(PAGE_SIZE_4K);
475 let last_unallocated_memory: usize =
476 (unallocated_memory_start as usize) + unallocated_memory_size;
477 let start_mem_page: usize = calc_page_index(aligned_address_app);
478
479 let last_page_app_mem: usize = calc_page_index(last_unallocated_memory);
480
481 let aligned_app_mem_size: usize = initial_app_memory_size.next_multiple_of(PAGE_SIZE_4K);
483 let aligned_kernel_mem_size: usize =
484 initial_kernel_memory_size.next_multiple_of(PAGE_SIZE_4K);
485 let aligned_min_mem_size: usize = min_memory_size.next_multiple_of(PAGE_SIZE_4K);
486
487 let mut pages_attr = LocalRegisterCopy::new(0);
488 match permissions {
489 Permissions::ReadWriteExecute => {
490 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
491 }
492 Permissions::ReadWriteOnly => {
493 pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
494 }
495 Permissions::ReadExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
496 Permissions::ReadOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
497 Permissions::ExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
498 }
499
500 let total_memory_size = cmp::max(
503 aligned_min_mem_size + aligned_kernel_mem_size,
504 aligned_app_mem_size + aligned_kernel_mem_size,
505 );
506 let pages_alloc_requested: usize = calc_alloc_pages(total_memory_size);
507 let kernel_alloc_pages: usize = calc_alloc_pages(aligned_kernel_mem_size);
508
509 if start_mem_page > MAX_PTE_ENTRY || start_mem_page + pages_alloc_requested > MAX_PTE_ENTRY
511 {
512 return None;
513 }
514 let end_of_unallocated_memory: usize =
516 unallocated_memory_start as usize + unallocated_memory_size;
517 let end_of_allocated_memory: usize = aligned_address_app + total_memory_size;
518 if end_of_allocated_memory > end_of_unallocated_memory {
519 None
520 } else {
521 let allocate_index = config
522 .page_information
523 .alloc_regions
524 .iter_mut()
525 .position(|r| r.is_none());
526
527 allocate_index?;
528
529 let allocate_index = allocate_index.unwrap();
530
531 let mut alloc_regions_flags_clear = LocalRegisterCopy::new(0);
532 alloc_regions_flags_clear.write(PTFLAGS::P::SET + PTFLAGS::RW::SET);
533 config.page_information.alloc_regions[allocate_index] = Some(AllocateRegion {
534 flags_set: pages_attr,
535 flags_clear: alloc_regions_flags_clear,
536 start_index_page: start_mem_page,
537 pages: calc_alloc_pages(aligned_app_mem_size),
538 });
539
540 let last_page = start_mem_page + calc_alloc_pages(aligned_app_mem_size);
541 let mut sram_page_table = self.pt.borrow_mut();
542 for page_index in start_mem_page..=last_page {
543 sram_page_table[page_index] =
545 PTEntry::new(sram_page_table[page_index].address(), pages_attr);
546 config.ram_regions += 1;
547 }
548
549 config
550 .page_information
551 .set_ram(start_mem_page, config.ram_regions);
552 config.page_information.last_page_owned = last_page_app_mem;
553 config.page_information.kernel_first_page = last_page_app_mem - kernel_alloc_pages;
554 config.page_information.app_ram_region = allocate_index;
555 Some((aligned_address_app as *const u8, total_memory_size))
556 }
557 }
558
559 fn update_app_memory_region(
560 &self,
561 app_memory_break: *const u8,
562 kernel_memory_break: *const u8,
563 _permissions: Permissions,
564 config: &mut Self::MpuConfig,
565 ) -> Result<(), ()> {
566 let page_in_app_break = calc_page_index(app_memory_break as usize);
570
571 let page_in_kernel_break = calc_page_index(kernel_memory_break as usize);
572
573 let last_page_currently =
576 config.page_information.get_ram() + config.page_information.ram_pages - 1;
577 let num_of_ram_pages = page_in_app_break - config.page_information.get_ram();
578
579 if (app_memory_break as usize) > (kernel_memory_break as usize)
583 || (page_in_app_break >= page_in_kernel_break)
584 || page_in_kernel_break > config.page_information.last_page_owned
585 {
586 return Err(());
587 }
588 if last_page_currently != page_in_app_break
590 || num_of_ram_pages != config.page_information.ram_pages
591 {
592 if let Some(r) = config.page_information.alloc_regions
593 [config.page_information.app_ram_region]
594 .as_mut()
595 {
596 r.pages = num_of_ram_pages;
597 }
598 config.page_information.ram_pages = num_of_ram_pages;
599 config.page_information.kernel_first_page = page_in_kernel_break;
600 }
601
602 Ok(())
603 }
604
605 fn configure_mpu(&self, config: &Self::MpuConfig) {
606 self.config_pages.map(|current_config| {
607 unsafe {
608 let mut sram_page_table = self.pt.borrow_mut();
609 for r in current_config.alloc_regions.iter().flatten() {
610 let init_region_page = r.start_index_page;
611 let last_region_page = init_region_page + r.pages;
612 for page_index in init_region_page..=last_region_page {
613 sram_page_table[page_index] =
615 PTEntry::new(sram_page_table[page_index].address(), r.flags_clear);
616 }
617 }
618
619 tlb::flush_all();
621 }
622 });
623 self.config_pages.put(config.page_information);
625
626 self.config_pages.map(|app_config| {
627 unsafe {
628 let mut sram_page_table = self.pt.borrow_mut();
629 for r in app_config.alloc_regions.iter().flatten() {
630 let init_region_page = r.start_index_page;
631 let last_region_page = init_region_page + r.pages;
632 for page_index in init_region_page..=last_region_page {
633 sram_page_table[page_index] =
635 PTEntry::new(sram_page_table[page_index].address(), r.flags_set);
636 }
637 }
638 tlb::flush_all();
640 }
641 });
642 }
643}