x86/
mpu.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2024.
4
5// todo: this module needs some polish
6
7use crate::registers::bits32::paging::{
8    PAddr, PDEntry, PTEntry, PTFlags, PD, PDFLAGS, PT, PTFLAGS,
9};
10use crate::registers::controlregs::{self, CR0, CR4};
11use crate::registers::tlb;
12use core::{cmp, fmt, mem};
13use kernel::platform::mpu::{Permissions, Region, MPU};
14use kernel::utilities::cells::MapCell;
15use tock_registers::LocalRegisterCopy;
16
17use core::cell::RefCell;
18
19//
20// Information about the page table and virtual addresses can be found here:
21// https://wiki.osdev.org/Paging
22//
23const MAX_PTE_ENTRY: usize = 1024;
24const PAGE_BITS_4K: usize = 12;
25const PAGE_SIZE_4K: usize = 1 << PAGE_BITS_4K;
26const PAGE_SIZE_4M: usize = 0x400000;
27const MAX_REGIONS: usize = 8;
28const PAGE_TABLE_MASK: usize = MAX_PTE_ENTRY - 1;
29
30#[derive(Copy, Clone)]
31struct AllocateRegion {
32    start_index_page: usize,
33    pages: usize,
34    flags_set: PTFlags,
35    flags_clear: PTFlags,
36}
37
38#[derive(Copy, Clone)]
39struct PageTableConfig {
40    start_ram_section: usize,
41    ram_pages: usize,
42    start_app_section: usize,
43    app_pages: usize,
44    last_page_owned: usize,
45    kernel_first_page: usize,
46    app_ram_region: usize,
47    alloc_regions: [Option<AllocateRegion>; MAX_REGIONS],
48}
49
50impl PageTableConfig {
51    pub fn new() -> Self {
52        Self {
53            start_ram_section: 0,
54            ram_pages: 0,
55            start_app_section: 0,
56            app_pages: 0,
57            last_page_owned: 0,
58            kernel_first_page: 0,
59            app_ram_region: 0,
60            alloc_regions: [None; MAX_REGIONS],
61        }
62    }
63    pub fn set_app(&mut self, start: usize, sections: usize) {
64        self.start_app_section = start;
65        self.app_pages = sections;
66    }
67    pub fn set_ram(&mut self, start: usize, sections: usize) {
68        self.start_ram_section = start;
69        self.ram_pages = sections;
70    }
71    pub fn get_ram(&self) -> usize {
72        self.start_ram_section
73    }
74}
75
76pub struct MemoryProtectionConfig {
77    num_regions: usize,
78    ram_regions: usize,
79    page_information: PageTableConfig,
80}
81
82impl Default for MemoryProtectionConfig {
83    fn default() -> Self {
84        Self::new()
85    }
86}
87
88impl MemoryProtectionConfig {
89    pub fn new() -> Self {
90        Self {
91            num_regions: 0,
92            ram_regions: 0,
93            page_information: PageTableConfig::new(),
94        }
95    }
96}
97
98impl fmt::Display for MemoryProtectionConfig {
99    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
100        writeln!(f)?;
101        writeln!(f, " Paging Configuration:")?;
102
103        writeln!(
104            f,
105            "  Total regions: {:10}   RAM regions: {:10}",
106            self.num_regions, self.ram_regions
107        )?;
108
109        let flash_start = self.page_information.start_app_section * PAGE_SIZE_4K;
110        let flash_length = self.page_information.app_pages * PAGE_SIZE_4K;
111        writeln!(
112            f,
113            "  Flash start:   {:#010x}   Length:      {:#10x}",
114            flash_start, flash_length
115        )?;
116
117        let ram_start = self.page_information.start_ram_section * PAGE_SIZE_4K;
118        let ram_length = self.page_information.ram_pages * PAGE_SIZE_4K;
119        writeln!(
120            f,
121            "  RAM start:     {:#010x}   Length:      {:#10x}",
122            ram_start, ram_length
123        )?;
124
125        let kernel_start = self.page_information.kernel_first_page * PAGE_SIZE_4K;
126        let kernel_length = (self.page_information.last_page_owned + 1
127            - self.page_information.kernel_first_page)
128            * PAGE_SIZE_4K;
129        writeln!(
130            f,
131            "  Kernel start:  {:#010x}   Length:      {:#10x}",
132            kernel_start, kernel_length
133        )?;
134        writeln!(f)?;
135
136        Ok(())
137    }
138}
139
140pub struct PagingMPU<'a> {
141    num_regions: usize,
142    config_pages: MapCell<PageTableConfig>,
143    page_dir_paddr: usize,
144    page_table_paddr: usize,
145    pd: RefCell<&'a mut PD>,
146    pt: RefCell<&'a mut PT>,
147}
148
149fn calc_page_index(memory_address: usize) -> usize {
150    memory_address / PAGE_SIZE_4K
151}
152
153// It will calculate the required pages doing a round up to Page size.
154fn calc_alloc_pages(memory_size: usize) -> usize {
155    memory_size.next_multiple_of(PAGE_SIZE_4K) / PAGE_SIZE_4K
156}
157
158impl<'a> PagingMPU<'a> {
159    pub unsafe fn new(
160        page_dir: &'a mut PD,
161        page_dir_paddr: usize,
162        page_table: &'a mut PT,
163        page_table_paddr: usize,
164    ) -> Self {
165        let page_dir = RefCell::new(page_dir);
166        let page_table = RefCell::new(page_table);
167
168        Self {
169            num_regions: 0,
170            config_pages: MapCell::empty(),
171            page_dir_paddr,
172            page_table_paddr,
173            pd: page_dir,
174            pt: page_table,
175        }
176    }
177
178    ///
179    /// Basic iterator to walk through all page table entries
180    ///
181    pub unsafe fn iterate_pt<C>(&self, mut closure: C)
182    where
183        C: FnMut(usize, &mut PTEntry),
184    {
185        let mut page_table = self.pt.borrow_mut();
186        for (n, entry) in page_table.iter_mut().enumerate() {
187            closure(n, entry);
188        }
189    }
190
191    ///
192    /// Get a page table entry from a virtual address
193    ///
194    pub fn pt_from_addr<C>(&self, mut closure: C, virtual_addr: usize)
195    where
196        C: FnMut(&mut PTEntry),
197    {
198        let mut page_table = self.pt.borrow_mut();
199        let mut page_index = virtual_addr >> PAGE_BITS_4K;
200        page_index &= PAGE_TABLE_MASK;
201
202        closure(&mut page_table[page_index]);
203    }
204
205    ///
206    /// initializes the page directory & page table
207    ///
208    pub unsafe fn initialize_page_tables(&self) {
209        let mut page_directory = self.pd.borrow_mut();
210
211        // This should set the Page directory to point directly to a 4M opening on system doing 1 - 1 Mapping
212        // so that all 32-bit space is accesible by the kernel
213        // Starts at 0x0000_0000 to 0xFFFF_FFFF covering full
214        for (n, entry) in page_directory.iter_mut().enumerate() {
215            let mut entry_flags = LocalRegisterCopy::new(0);
216            entry_flags.write(PDFLAGS::PS::SET + PDFLAGS::RW::SET + PDFLAGS::P::SET);
217            // Set up the page directory with
218            *entry = PDEntry::new(PAddr::from(PAGE_SIZE_4M * n), entry_flags);
219        }
220
221        // This Page Directory Entry maps the space from 0x0000_0000 until 0x40_0000
222        // this entry needs to be marked as User Accessible so entries in page table can be accessible to user.
223        let mut page_directory_flags = LocalRegisterCopy::new(0);
224        page_directory_flags.write(PDFLAGS::P::SET + PDFLAGS::RW::SET + PDFLAGS::US::SET);
225        page_directory[0] = PDEntry::new(PAddr::from(self.page_table_paddr), page_directory_flags);
226
227        //  Map the first 4 MiB of memory into 4 KiB entries
228        let mut page_table = self.pt.borrow_mut();
229        let mut page_table_flags = LocalRegisterCopy::new(0);
230        page_table_flags.write(PTFLAGS::P::SET + PTFLAGS::RW::SET);
231        for (n, entry) in page_table.iter_mut().enumerate() {
232            *entry = PTEntry::new(PAddr::from(PAGE_SIZE_4K * n), page_table_flags);
233        }
234    }
235
236    ///
237    /// Performs basic x86-32 bit paging enablement
238    ///
239    /// This function enables Paging (CR4) and sets the page directory ptr (in physical address)
240    /// into CR3.
241    ///
242    unsafe fn enable_paging(&self) {
243        // In order to enable a 4M make sure PSE is enabled in CR4
244        let mut cr4_value = unsafe { controlregs::cr4() };
245        if !cr4_value.is_set(CR4::CR4_ENABLE_PSE) {
246            cr4_value.modify(CR4::CR4_ENABLE_PSE::SET);
247            unsafe {
248                controlregs::cr4_write(cr4_value);
249            }
250        }
251
252        unsafe {
253            // Now with the page directory and page table mapped load it to CR3
254            controlregs::cr3_write(self.page_dir_paddr as u64);
255
256            // Finally enable paging setting the value in CR0
257            let mut cr0_value = controlregs::cr0();
258            cr0_value.modify(CR0::CR0_ENABLE_PAGING::SET);
259            controlregs::cr0_write(cr0_value);
260        }
261    }
262
263    ///
264    /// General init function
265    /// This function automatically sets the page directory and page table, and then enables
266    /// Paging
267    pub fn init(&self) {
268        unsafe {
269            self.initialize_page_tables();
270            self.enable_paging();
271        }
272    }
273}
274
275impl fmt::Display for PagingMPU<'_> {
276    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
277        write!(f, "Num_regions: {:?}, ...", self.num_regions,)
278    }
279}
280
281// `MPU` is an unsafe trait, and with this implementation we guarantee
282// that we adhere to the semantics documented on that trait and its
283// associated types and methods.
284unsafe impl MPU for PagingMPU<'_> {
285    type MpuConfig = MemoryProtectionConfig;
286
287    fn new_config(&self) -> Option<Self::MpuConfig> {
288        Some(MemoryProtectionConfig {
289            num_regions: 0,
290            ram_regions: 0,
291            page_information: PageTableConfig::new(),
292        })
293    }
294
295    fn reset_config(&self, config: &mut Self::MpuConfig) {
296        config.num_regions = 0;
297        config.ram_regions = 0;
298        config.page_information = PageTableConfig::new();
299    }
300
301    // Once paging is enabled it is enabled for Ring0/Ring3
302    fn enable_app_mpu(&self) {}
303
304    // Paging stays enabled for Ring0/Ring3
305    unsafe fn disable_app_mpu(&self) {}
306
307    /// Returns the maximum number of regions supported by the MPU.
308    fn number_total_regions(&self) -> usize {
309        mem::size_of::<PT>() / mem::size_of::<PTEntry>()
310    }
311
312    fn allocate_region(
313        &self,
314        unallocated_memory_start: *const u8,
315        unallocated_memory_size: usize,
316        min_region_size: usize,
317        permissions: Permissions,
318        config: &mut Self::MpuConfig,
319    ) -> Option<Region> {
320        // Check for the start of the unallocated memory as to be 4K Page aligned.
321        let aligned_address_start: usize =
322            (unallocated_memory_start as usize).next_multiple_of(PAGE_SIZE_4K);
323        let page_index: usize = calc_page_index(aligned_address_start);
324
325        let pages_alloc_requested: usize = calc_alloc_pages(min_region_size);
326
327        let total_page_aligned_size: usize = pages_alloc_requested * PAGE_SIZE_4K;
328
329        if aligned_address_start + total_page_aligned_size
330            > unallocated_memory_start as usize + unallocated_memory_size
331        {
332            return None;
333        }
334
335        // check to see if this is an exact duplicate region allocation
336        for r in config.page_information.alloc_regions.iter().flatten() {
337            if r.start_index_page == page_index && r.pages == pages_alloc_requested {
338                return Some(Region::new(
339                    aligned_address_start as *const u8,
340                    total_page_aligned_size,
341                ));
342            }
343        }
344
345        // Execution protection needs to enable PAE on the system needs support from CPU.
346        // Need to check then the pages entry will go to use NXE bit
347
348        let mut pages_attr = LocalRegisterCopy::new(0);
349        match permissions {
350            Permissions::ReadWriteExecute => {
351                pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
352            }
353            Permissions::ReadWriteOnly => {
354                pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
355            }
356            Permissions::ReadExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
357            Permissions::ReadOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
358            Permissions::ExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
359        }
360
361        // For allocating a region we also need the right level to set it back to
362        // if is a shared region in RAM memory this region needs to be WR to Kernel
363        // anything else should be just Present
364        let mut pages_clear = LocalRegisterCopy::new(0);
365        match permissions {
366            Permissions::ReadWriteOnly => pages_clear.write(PTFLAGS::P::SET + PTFLAGS::RW::SET),
367            _ => pages_clear.write(PTFLAGS::P::SET),
368        }
369
370        // Calculate the page offset based on the init.
371        if page_index > MAX_PTE_ENTRY || page_index + pages_alloc_requested > MAX_PTE_ENTRY {
372            return None;
373        }
374
375        // check for the start and end to be within limits
376        let end_of_unallocated_memory: usize =
377            unallocated_memory_start as usize + unallocated_memory_size;
378        let end_of_allocated_memory: usize = aligned_address_start + total_page_aligned_size - 1;
379        if calc_page_index(end_of_allocated_memory) > calc_page_index(end_of_unallocated_memory) {
380            None
381        } else {
382            // Find the next free region that is not used
383            let index = config
384                .page_information
385                .alloc_regions
386                .iter_mut()
387                .position(|r| r.is_none());
388
389            match index {
390                Some(i) => {
391                    config.page_information.alloc_regions[i] = Some(AllocateRegion {
392                        flags_set: pages_attr,
393                        flags_clear: pages_clear,
394                        start_index_page: page_index,
395                        pages: pages_alloc_requested,
396                    });
397                }
398                None => return None,
399            }
400
401            let last_page = page_index + pages_alloc_requested;
402
403            let mut sram_page_table = self.pt.borrow_mut();
404
405            for current_page in page_index..=last_page {
406                sram_page_table[current_page] =
407                    PTEntry::new(sram_page_table[current_page].address(), pages_attr);
408                config.num_regions += 1;
409            }
410
411            config
412                .page_information
413                .set_app(page_index, config.num_regions);
414
415            Some(Region::new(
416                aligned_address_start as *const u8,
417                total_page_aligned_size,
418            ))
419        }
420    }
421
422    fn remove_memory_region(&self, region: Region, config: &mut Self::MpuConfig) -> Result<(), ()> {
423        unsafe {
424            let start_page = calc_page_index(region.start_address() as usize);
425            let last_page = start_page + calc_alloc_pages(region.size());
426
427            // Find the region that is used
428            let index = config.page_information.alloc_regions.iter().position(|r| {
429                if let Some(r) = r {
430                    if r.start_index_page == start_page && r.pages == last_page - start_page {
431                        return true;
432                    }
433                }
434                false
435            });
436
437            // If the region is not found return an error, otherwise remove it
438            match index {
439                Some(i) => {
440                    config.page_information.alloc_regions[i] = None;
441                }
442                None => return Err(()),
443            }
444
445            // Update the page table to remove the region
446            let mut sram_page_table = self.pt.borrow_mut();
447            for page_index in start_page..=last_page {
448                // Reset using the same Address but modify flags
449                let mut sram_page_table_flags = LocalRegisterCopy::new(0);
450                sram_page_table_flags.write(PTFLAGS::P::SET);
451                sram_page_table[page_index] =
452                    PTEntry::new(sram_page_table[page_index].address(), sram_page_table_flags);
453
454                // invalidate the TLB to the virtual address
455                let inv_page = page_index * PAGE_SIZE_4K;
456                tlb::flush(inv_page);
457                config.num_regions -= 1;
458            }
459        }
460        Ok(())
461    }
462
463    fn allocate_app_memory_region(
464        &self,
465        unallocated_memory_start: *const u8,
466        unallocated_memory_size: usize,
467        min_memory_size: usize,
468        initial_app_memory_size: usize,
469        initial_kernel_memory_size: usize,
470        permissions: Permissions,
471        config: &mut Self::MpuConfig,
472    ) -> Option<(*const u8, usize)> {
473        // this should allocate memory in a continous block right after the user
474        // the kernel should be there
475
476        let aligned_address_app: usize =
477            (unallocated_memory_start as usize).next_multiple_of(PAGE_SIZE_4K);
478        let last_unallocated_memory: usize =
479            (unallocated_memory_start as usize) + unallocated_memory_size;
480        let start_mem_page: usize = calc_page_index(aligned_address_app);
481
482        let last_page_app_mem: usize = calc_page_index(last_unallocated_memory);
483
484        // for x86 the minimal granularity is a 4k page
485        let aligned_app_mem_size: usize = initial_app_memory_size.next_multiple_of(PAGE_SIZE_4K);
486        let aligned_kernel_mem_size: usize =
487            initial_kernel_memory_size.next_multiple_of(PAGE_SIZE_4K);
488        let aligned_min_mem_size: usize = min_memory_size.next_multiple_of(PAGE_SIZE_4K);
489
490        let mut pages_attr = LocalRegisterCopy::new(0);
491        match permissions {
492            Permissions::ReadWriteExecute => {
493                pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
494            }
495            Permissions::ReadWriteOnly => {
496                pages_attr.write(PTFLAGS::P::SET + PTFLAGS::RW::SET + PTFLAGS::US::SET)
497            }
498            Permissions::ReadExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
499            Permissions::ReadOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
500            Permissions::ExecuteOnly => pages_attr.write(PTFLAGS::P::SET + PTFLAGS::US::SET),
501        }
502
503        // Compute what the maximum should be at this point all should be page-aligned.
504
505        let total_memory_size = cmp::max(
506            aligned_min_mem_size + aligned_kernel_mem_size,
507            aligned_app_mem_size + aligned_kernel_mem_size,
508        );
509        let pages_alloc_requested: usize = calc_alloc_pages(total_memory_size);
510        let kernel_alloc_pages: usize = calc_alloc_pages(aligned_kernel_mem_size);
511
512        // Check the page offset based on the init and last page
513        if start_mem_page > MAX_PTE_ENTRY || start_mem_page + pages_alloc_requested > MAX_PTE_ENTRY
514        {
515            return None;
516        }
517        // Check the boundary to the end of the calculated data size.
518        let end_of_unallocated_memory: usize =
519            unallocated_memory_start as usize + unallocated_memory_size;
520        let end_of_allocated_memory: usize = aligned_address_app + total_memory_size;
521        if end_of_allocated_memory > end_of_unallocated_memory {
522            None
523        } else {
524            let allocate_index = config
525                .page_information
526                .alloc_regions
527                .iter_mut()
528                .position(|r| r.is_none());
529
530            allocate_index?;
531
532            let allocate_index = allocate_index.unwrap();
533
534            let mut alloc_regions_flags_clear = LocalRegisterCopy::new(0);
535            alloc_regions_flags_clear.write(PTFLAGS::P::SET + PTFLAGS::RW::SET);
536            config.page_information.alloc_regions[allocate_index] = Some(AllocateRegion {
537                flags_set: pages_attr,
538                flags_clear: alloc_regions_flags_clear,
539                start_index_page: start_mem_page,
540                pages: calc_alloc_pages(aligned_app_mem_size),
541            });
542
543            let last_page = start_mem_page + calc_alloc_pages(aligned_app_mem_size);
544            let mut sram_page_table = self.pt.borrow_mut();
545            for page_index in start_mem_page..=last_page {
546                // Reset
547                sram_page_table[page_index] =
548                    PTEntry::new(sram_page_table[page_index].address(), pages_attr);
549                config.ram_regions += 1;
550            }
551
552            config
553                .page_information
554                .set_ram(start_mem_page, config.ram_regions);
555            config.page_information.last_page_owned = last_page_app_mem;
556            config.page_information.kernel_first_page = last_page_app_mem - kernel_alloc_pages;
557            config.page_information.app_ram_region = allocate_index;
558            Some((aligned_address_app as *const u8, total_memory_size))
559        }
560    }
561
562    fn update_app_memory_region(
563        &self,
564        app_memory_break: *const u8,
565        kernel_memory_break: *const u8,
566        _permissions: Permissions,
567        config: &mut Self::MpuConfig,
568    ) -> Result<(), ()> {
569        // Given how x86 page are tied to a 4k page app memory can't include
570        // parts in the same page, check if new break is lurking to kernel page.
571        // Depending on App memory grants is the memory waste on kernel assigned page.
572        let page_in_app_break = calc_page_index(app_memory_break as usize);
573
574        let page_in_kernel_break = calc_page_index(kernel_memory_break as usize);
575
576        // Last page currently owned should include the ram it self to correctly calculate
577        // we have moved from the last page app currently owns
578        let last_page_currently =
579            config.page_information.get_ram() + config.page_information.ram_pages - 1;
580        let num_of_ram_pages = page_in_app_break - config.page_information.get_ram();
581
582        // Check for boundaries on last page we had assigned as well as it doesn't pass
583        // user request to a kernel owned page
584
585        if (app_memory_break as usize) > (kernel_memory_break as usize)
586            || (page_in_app_break >= page_in_kernel_break)
587            || page_in_kernel_break > config.page_information.last_page_owned
588        {
589            return Err(());
590        }
591        // Now lets check if there are changes which will trigger a reconfig
592        if last_page_currently != page_in_app_break
593            || num_of_ram_pages != config.page_information.ram_pages
594        {
595            if let Some(r) = config.page_information.alloc_regions
596                [config.page_information.app_ram_region]
597                .as_mut()
598            {
599                r.pages = num_of_ram_pages;
600            }
601            config.page_information.ram_pages = num_of_ram_pages;
602            config.page_information.kernel_first_page = page_in_kernel_break;
603        }
604
605        Ok(())
606    }
607
608    unsafe fn configure_mpu(&self, config: &Self::MpuConfig) {
609        self.config_pages.map(|current_config| {
610            unsafe {
611                let mut sram_page_table = self.pt.borrow_mut();
612                for r in current_config.alloc_regions.iter().flatten() {
613                    let init_region_page = r.start_index_page;
614                    let last_region_page = init_region_page + r.pages;
615                    for page_index in init_region_page..=last_region_page {
616                        // Reset using the same Address setting flags
617                        sram_page_table[page_index] =
618                            PTEntry::new(sram_page_table[page_index].address(), r.flags_clear);
619                    }
620                }
621
622                // Moving to a single operation so it can refresh TLB's at the same time
623                tlb::flush_all();
624            }
625        });
626        // Now set the current config as the one being used in the app id
627        self.config_pages.put(config.page_information);
628
629        self.config_pages.map(|app_config| {
630            unsafe {
631                let mut sram_page_table = self.pt.borrow_mut();
632                for r in app_config.alloc_regions.iter().flatten() {
633                    let init_region_page = r.start_index_page;
634                    let last_region_page = init_region_page + r.pages;
635                    for page_index in init_region_page..=last_region_page {
636                        // Reset using the same Address setting flags
637                        sram_page_table[page_index] =
638                            PTEntry::new(sram_page_table[page_index].address(), r.flags_set);
639                    }
640                }
641                // Moving to a single operation so it can refresh TLB's at the same time
642                tlb::flush_all();
643            }
644        });
645    }
646}