kernel/process_standard.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Tock default Process implementation.
6//!
7//! `ProcessStandard` is an implementation for a userspace process running on
8//! the Tock kernel.
9
10use core::cell::Cell;
11use core::cmp;
12use core::fmt::Write;
13use core::mem::MaybeUninit;
14use core::num::NonZeroU32;
15use core::ptr::NonNull;
16use core::{mem, ptr, slice, str};
17
18use crate::collections::queue::Queue;
19use crate::collections::ring_buffer::RingBuffer;
20use crate::config;
21use crate::debug;
22use crate::errorcode::ErrorCode;
23use crate::kernel::Kernel;
24use crate::platform::chip::Chip;
25use crate::platform::mpu::{self, MPU};
26use crate::process::ProcessBinary;
27use crate::process::{BinaryVersion, ReturnArguments};
28use crate::process::{Error, FunctionCall, FunctionCallSource, Process, Task};
29use crate::process::{FaultAction, ProcessCustomGrantIdentifier, ProcessId};
30use crate::process::{ProcessAddresses, ProcessSizes, ShortId};
31use crate::process::{State, StoppedState};
32use crate::process_checker::AcceptedCredential;
33use crate::process_loading::ProcessLoadError;
34use crate::process_policies::ProcessFaultPolicy;
35use crate::process_policies::ProcessStandardStoragePermissionsPolicy;
36use crate::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer};
37use crate::storage_permissions::StoragePermissions;
38use crate::syscall::{self, Syscall, SyscallReturn, UserspaceKernelBoundary};
39use crate::upcall::UpcallId;
40use crate::utilities::capability_ptr::{CapabilityPtr, CapabilityPtrPermissions};
41use crate::utilities::cells::{MapCell, NumericCellExt, OptionalCell};
42
43use tock_tbf::types::CommandPermissions;
44
45/// Gets a mutable (unique) reference to the contained value.
46///
47/// TODO: this is copied from the standard library, where it is available under
48/// the `maybe_uninit_slice` nightly feature. Remove and switch to the core
49/// library variant once that is stable.
50///
51/// # Safety
52///
53/// Calling this when the content is not yet fully initialized causes undefined
54/// behavior: it is up to the caller to guarantee that every `MaybeUninit<T>` in the
55/// slice really is in an initialized state. For instance, `.assume_init_mut()` cannot
56/// be used to initialize a `MaybeUninit` slice.
57#[inline(always)]
58const unsafe fn maybe_uninit_slice_assume_init_mut<T>(src: &mut [MaybeUninit<T>]) -> &mut [T] {
59 // SAFETY: similar to safety notes for `slice_get_ref`, but we have a
60 // mutable reference which is also guaranteed to be valid for writes.
61 #[allow(clippy::ref_as_ptr)]
62 unsafe {
63 &mut *(src as *mut [MaybeUninit<T>] as *mut [T])
64 }
65}
66
67/// Divides one mutable raw slice into two at an index.
68///
69/// This method implementation is copied from the standard library, where it is
70/// available with `raw_slice_split` nightly feature. TODO: switch to the
71/// standard library function once that is stable.
72///
73/// The first will contain all indices from `[0, mid)` (excluding the index
74/// `mid` itself) and the second will contain all indices from `[mid, len)`
75/// (excluding the index `len` itself).
76///
77/// # Panics
78///
79/// Panics if `mid > len`.
80///
81/// # Safety
82///
83/// `mid` must be [in-bounds] of the underlying [allocation]. Which means
84/// `self` must be dereferenceable and span a single allocation that is at least
85/// `mid * size_of::<T>()` bytes long. Not upholding these requirements is
86/// *[undefined behavior]* even if the resulting pointers are not used.
87///
88/// Since `len` being in-bounds is not a safety invariant of `*mut [T]` the
89/// safety requirements of this method are the same as for
90/// [`split_at_mut_unchecked`]. The explicit bounds check is only as useful as
91/// `len` is correct.
92///
93/// [`split_at_mut_unchecked`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.split_at_mut_unchecked
94/// [in-bounds]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.add-1
95/// [allocation]: https://doc.rust-lang.org/stable/std/ptr/index.html#allocation
96/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
97unsafe fn raw_slice_split_at_mut<T>(slice: *mut [T], mid: usize) -> (*mut [T], *mut [T]) {
98 assert!(mid <= slice.len());
99
100 let len = slice.len();
101 let ptr = slice.cast::<T>();
102
103 // SAFETY: Caller must pass a valid pointer and an index that is in-bounds.
104 let tail = unsafe { ptr.add(mid) };
105 (
106 core::ptr::slice_from_raw_parts_mut(ptr, mid),
107 core::ptr::slice_from_raw_parts_mut(tail, len - mid),
108 )
109}
110
111/// Interface supported by [`ProcessStandard`] for recording debug information.
112///
113/// This trait provides flexibility to users of [`ProcessStandard`] to determine
114/// how debugging information should be recorded, or if debugging information
115/// should be recorded at all.
116///
117/// Platforms that want to only maintain certain debugging information can
118/// implement only part of this trait.
119///
120/// Tock provides a default implementation of this trait on the `()` type.
121/// Kernels that wish to use [`ProcessStandard`] but do not need process-level
122/// debugging information can use `()` as the `ProcessStandardDebug` type.
123pub trait ProcessStandardDebug: Default {
124 /// Record the address in flash the process expects to start at.
125 fn set_fixed_address_flash(&self, address: u32);
126 /// Get the address in flash the process expects to start at, if it was
127 /// recorded.
128 fn get_fixed_address_flash(&self) -> Option<u32>;
129 /// Record the address in RAM the process expects to start at.
130 fn set_fixed_address_ram(&self, address: u32);
131 /// Get the address in RAM the process expects to start at, if it was
132 /// recorded.
133 fn get_fixed_address_ram(&self) -> Option<u32>;
134 /// Record the address where the process placed its heap.
135 fn set_app_heap_start_pointer(&self, ptr: *const u8);
136 /// Get the address where the process placed its heap, if it was recorded.
137 fn get_app_heap_start_pointer(&self) -> Option<*const u8>;
138 /// Record the address where the process placed its stack.
139 fn set_app_stack_start_pointer(&self, ptr: *const u8);
140 /// Get the address where the process placed its stack, if it was recorded.
141 fn get_app_stack_start_pointer(&self) -> Option<*const u8>;
142 /// Update the lowest address that the process's stack has reached.
143 fn set_app_stack_min_pointer(&self, ptr: *const u8);
144 /// Get the lowest address of the process's stack , if it was recorded.
145 fn get_app_stack_min_pointer(&self) -> Option<*const u8>;
146 /// Provide the current address of the bottom of the stack and record the
147 /// address if it is the lowest address that the process's stack has
148 /// reached.
149 fn set_new_app_stack_min_pointer(&self, ptr: *const u8);
150
151 /// Record the most recent system call the process called.
152 fn set_last_syscall(&self, syscall: Syscall);
153 /// Get the most recent system call the process called, if it was recorded.
154 fn get_last_syscall(&self) -> Option<Syscall>;
155 /// Clear any record of the most recent system call the process called.
156 fn reset_last_syscall(&self);
157
158 /// Increase the recorded count of the number of system calls the process
159 /// has called.
160 fn increment_syscall_count(&self);
161 /// Get the recorded count of the number of system calls the process has
162 /// called.
163 ///
164 /// This should return 0 if
165 /// [`ProcessStandardDebug::increment_syscall_count()`] is never called.
166 fn get_syscall_count(&self) -> usize;
167 /// Reset the recorded count of the number of system calls called by the app
168 /// to 0.
169 fn reset_syscall_count(&self);
170
171 /// Increase the recorded count of the number of upcalls that have been
172 /// dropped for the process.
173 fn increment_dropped_upcall_count(&self);
174 /// Get the recorded count of the number of upcalls that have been dropped
175 /// for the process.
176 ///
177 /// This should return 0 if
178 /// [`ProcessStandardDebug::increment_dropped_upcall_count()`] is never
179 /// called.
180 fn get_dropped_upcall_count(&self) -> usize;
181 /// Reset the recorded count of the number of upcalls that have been dropped
182 /// for the process to 0.
183 fn reset_dropped_upcall_count(&self);
184
185 /// Increase the recorded count of the number of times the process has
186 /// exceeded its timeslice.
187 fn increment_timeslice_expiration_count(&self);
188 /// Get the recorded count of the number times the process has exceeded its
189 /// timeslice.
190 ///
191 /// This should return 0 if
192 /// [`ProcessStandardDebug::increment_timeslice_expiration_count()`] is
193 /// never called.
194 fn get_timeslice_expiration_count(&self) -> usize;
195 /// Reset the recorded count of the number of the process has exceeded its
196 /// timeslice to 0.
197 fn reset_timeslice_expiration_count(&self);
198}
199
200/// A debugging implementation for [`ProcessStandard`] that records the full
201/// debugging state.
202pub struct ProcessStandardDebugFull {
203 /// Inner field for the debug state that is in a [`MapCell`] to provide
204 /// mutable access.
205 debug: MapCell<ProcessStandardDebugFullInner>,
206}
207
208/// Struct for debugging [`ProcessStandard`] processes that records the full set
209/// of debugging information.
210///
211/// These pointers and counters are not strictly required for kernel operation,
212/// but provide helpful information when an app crashes.
213#[derive(Default)]
214struct ProcessStandardDebugFullInner {
215 /// If this process was compiled for fixed addresses, save the address
216 /// it must be at in flash. This is useful for debugging and saves having
217 /// to re-parse the entire TBF header.
218 fixed_address_flash: Option<u32>,
219
220 /// If this process was compiled for fixed addresses, save the address
221 /// it must be at in RAM. This is useful for debugging and saves having
222 /// to re-parse the entire TBF header.
223 fixed_address_ram: Option<u32>,
224
225 /// Where the process has started its heap in RAM.
226 app_heap_start_pointer: Option<*const u8>,
227
228 /// Where the start of the stack is for the process. If the kernel does the
229 /// PIC setup for this app then we know this, otherwise we need the app to
230 /// tell us where it put its stack.
231 app_stack_start_pointer: Option<*const u8>,
232
233 /// How low have we ever seen the stack pointer.
234 app_stack_min_pointer: Option<*const u8>,
235
236 /// How many syscalls have occurred since the process started.
237 syscall_count: usize,
238
239 /// What was the most recent syscall.
240 last_syscall: Option<Syscall>,
241
242 /// How many upcalls were dropped because the queue was insufficiently
243 /// long.
244 dropped_upcall_count: usize,
245
246 /// How many times this process has been paused because it exceeded its
247 /// timeslice.
248 timeslice_expiration_count: usize,
249}
250
251impl ProcessStandardDebug for ProcessStandardDebugFull {
252 fn set_fixed_address_flash(&self, address: u32) {
253 self.debug.map(|d| d.fixed_address_flash = Some(address));
254 }
255 fn get_fixed_address_flash(&self) -> Option<u32> {
256 self.debug.map_or(None, |d| d.fixed_address_flash)
257 }
258 fn set_fixed_address_ram(&self, address: u32) {
259 self.debug.map(|d| d.fixed_address_ram = Some(address));
260 }
261 fn get_fixed_address_ram(&self) -> Option<u32> {
262 self.debug.map_or(None, |d| d.fixed_address_ram)
263 }
264 fn set_app_heap_start_pointer(&self, ptr: *const u8) {
265 self.debug.map(|d| d.app_heap_start_pointer = Some(ptr));
266 }
267 fn get_app_heap_start_pointer(&self) -> Option<*const u8> {
268 self.debug.map_or(None, |d| d.app_heap_start_pointer)
269 }
270 fn set_app_stack_start_pointer(&self, ptr: *const u8) {
271 self.debug.map(|d| d.app_stack_start_pointer = Some(ptr));
272 }
273 fn get_app_stack_start_pointer(&self) -> Option<*const u8> {
274 self.debug.map_or(None, |d| d.app_stack_start_pointer)
275 }
276 fn set_app_stack_min_pointer(&self, ptr: *const u8) {
277 self.debug.map(|d| d.app_stack_min_pointer = Some(ptr));
278 }
279 fn get_app_stack_min_pointer(&self) -> Option<*const u8> {
280 self.debug.map_or(None, |d| d.app_stack_min_pointer)
281 }
282 fn set_new_app_stack_min_pointer(&self, ptr: *const u8) {
283 self.debug.map(|d| {
284 match d.app_stack_min_pointer {
285 None => d.app_stack_min_pointer = Some(ptr),
286 Some(asmp) => {
287 // Update max stack depth if needed.
288 if ptr < asmp {
289 d.app_stack_min_pointer = Some(ptr);
290 }
291 }
292 }
293 });
294 }
295
296 fn set_last_syscall(&self, syscall: Syscall) {
297 self.debug.map(|d| d.last_syscall = Some(syscall));
298 }
299 fn get_last_syscall(&self) -> Option<Syscall> {
300 self.debug.map_or(None, |d| d.last_syscall)
301 }
302 fn reset_last_syscall(&self) {
303 self.debug.map(|d| d.last_syscall = None);
304 }
305
306 fn increment_syscall_count(&self) {
307 self.debug.map(|d| d.syscall_count += 1);
308 }
309 fn get_syscall_count(&self) -> usize {
310 self.debug.map_or(0, |d| d.syscall_count)
311 }
312 fn reset_syscall_count(&self) {
313 self.debug.map(|d| d.syscall_count = 0);
314 }
315
316 fn increment_dropped_upcall_count(&self) {
317 self.debug.map(|d| d.dropped_upcall_count += 1);
318 }
319 fn get_dropped_upcall_count(&self) -> usize {
320 self.debug.map_or(0, |d| d.dropped_upcall_count)
321 }
322 fn reset_dropped_upcall_count(&self) {
323 self.debug.map(|d| d.dropped_upcall_count = 0);
324 }
325
326 fn increment_timeslice_expiration_count(&self) {
327 self.debug.map(|d| d.timeslice_expiration_count += 1);
328 }
329 fn get_timeslice_expiration_count(&self) -> usize {
330 self.debug.map_or(0, |d| d.timeslice_expiration_count)
331 }
332 fn reset_timeslice_expiration_count(&self) {
333 self.debug.map(|d| d.timeslice_expiration_count = 0);
334 }
335}
336
337impl Default for ProcessStandardDebugFull {
338 fn default() -> Self {
339 Self {
340 debug: MapCell::new(ProcessStandardDebugFullInner::default()),
341 }
342 }
343}
344
345impl ProcessStandardDebug for () {
346 fn set_fixed_address_flash(&self, _address: u32) {}
347 fn get_fixed_address_flash(&self) -> Option<u32> {
348 None
349 }
350 fn set_fixed_address_ram(&self, _address: u32) {}
351 fn get_fixed_address_ram(&self) -> Option<u32> {
352 None
353 }
354 fn set_app_heap_start_pointer(&self, _ptr: *const u8) {}
355 fn get_app_heap_start_pointer(&self) -> Option<*const u8> {
356 None
357 }
358 fn set_app_stack_start_pointer(&self, _ptr: *const u8) {}
359 fn get_app_stack_start_pointer(&self) -> Option<*const u8> {
360 None
361 }
362 fn set_app_stack_min_pointer(&self, _ptr: *const u8) {}
363 fn get_app_stack_min_pointer(&self) -> Option<*const u8> {
364 None
365 }
366 fn set_new_app_stack_min_pointer(&self, _ptr: *const u8) {}
367
368 fn set_last_syscall(&self, _syscall: Syscall) {}
369 fn get_last_syscall(&self) -> Option<Syscall> {
370 None
371 }
372 fn reset_last_syscall(&self) {}
373
374 fn increment_syscall_count(&self) {}
375 fn get_syscall_count(&self) -> usize {
376 0
377 }
378 fn reset_syscall_count(&self) {}
379 fn increment_dropped_upcall_count(&self) {}
380 fn get_dropped_upcall_count(&self) -> usize {
381 0
382 }
383 fn reset_dropped_upcall_count(&self) {}
384 fn increment_timeslice_expiration_count(&self) {}
385 fn get_timeslice_expiration_count(&self) -> usize {
386 0
387 }
388 fn reset_timeslice_expiration_count(&self) {}
389}
390
391/// Entry that is stored in the grant pointer table at the top of process
392/// memory.
393///
394/// One copy of this entry struct is stored per grant region defined in the
395/// kernel. This type allows the core kernel to lookup a grant based on the
396/// driver_num associated with the grant, and also holds the pointer to the
397/// memory allocated for the particular grant.
398#[repr(C)]
399struct GrantPointerEntry {
400 /// The syscall driver number associated with the allocated grant.
401 ///
402 /// This defaults to 0 if the grant has not been allocated. Note, however,
403 /// that 0 is a valid driver_num, and therefore cannot be used to check if a
404 /// grant is allocated or not.
405 driver_num: usize,
406
407 /// The start of the memory location where the grant has been allocated, or
408 /// null if the grant has not been allocated.
409 grant_ptr: *mut u8,
410}
411
412/// A type for userspace processes in Tock.
413///
414/// As its name implies, this is the standard implementation for Tock processes
415/// that exposes the full support for processes running on embedded hardware.
416///
417/// [`ProcessStandard`] is templated on two parameters:
418///
419/// - `C`: [`Chip`]: The implementation must know the [`Chip`] the kernel is
420/// running on to properly store architecture-specific and MPU state for the
421/// process.
422/// - `D`: [`ProcessStandardDebug`]: This configures the debugging mechanism the
423/// process uses for storing optional debugging data. Kernels that do not wish
424/// to store per-process debugging state can use the `()` type for this
425/// parameter.
426pub struct ProcessStandard<'a, C: 'static + Chip, D: 'static + ProcessStandardDebug + Default> {
427 /// Identifier of this process and the index of the process in the process
428 /// table.
429 process_id: Cell<ProcessId>,
430
431 /// An application ShortId, generated from process loading and
432 /// checking, which denotes the security identity of this process.
433 app_id: ShortId,
434
435 /// Pointer to the main Kernel struct.
436 kernel: &'static Kernel,
437
438 /// Pointer to the struct that defines the actual chip the kernel is running
439 /// on. This is used because processes have subtle hardware-based
440 /// differences. Specifically, the actual syscall interface and how
441 /// processes are switched to is architecture-specific, and how memory must
442 /// be allocated for memory protection units is also hardware-specific.
443 chip: &'static C,
444
445 /// Application memory layout:
446 ///
447 /// ```text
448 /// ╒════════ ← memory_start + memory_len
449 /// ╔═ │ Grant Pointers
450 /// ║ │ ──────
451 /// │ Process Control Block
452 /// D │ ──────
453 /// Y │ Grant Regions
454 /// N │
455 /// A │ ↓
456 /// M │ ────── ← kernel_memory_break
457 /// I │
458 /// C │ ────── ← app_break ═╗
459 /// │ ║
460 /// ║ │ ↑ A
461 /// ║ │ Heap P C
462 /// ╠═ │ ────── ← app_heap_start R C
463 /// │ Data O E
464 /// F │ ────── ← data_start_pointer C S
465 /// I │ Stack E S
466 /// X │ ↓ S I
467 /// E │ S B
468 /// D │ ────── ← current_stack_pointer L
469 /// │ ║ E
470 /// ╚═ ╘════════ ← memory_start ═╝
471 /// ```
472 ///
473 /// The start of process memory. We store this as a pointer and length and
474 /// not a slice due to Rust aliasing rules. If we were to store a slice,
475 /// then any time another slice to the same memory or an ProcessBuffer is
476 /// used in the kernel would be undefined behavior.
477 memory_start: *const u8,
478 /// Number of bytes of memory allocated to this process.
479 memory_len: usize,
480
481 /// Reference to the slice of `GrantPointerEntry`s stored in the process's
482 /// memory reserved for the kernel. These driver numbers are zero and
483 /// pointers are null if the grant region has not been allocated. When the
484 /// grant region is allocated these pointers are updated to point to the
485 /// allocated memory and the driver number is set to match the driver that
486 /// owns the grant. No other reference to these pointers exists in the Tock
487 /// kernel.
488 grant_pointers: MapCell<&'static mut [GrantPointerEntry]>,
489
490 /// Pointer to the end of the allocated (and MPU protected) grant region.
491 kernel_memory_break: Cell<*const u8>,
492
493 /// Pointer to the end of process RAM that has been sbrk'd to the process.
494 app_break: Cell<*const u8>,
495
496 /// Pointer to high water mark for process buffers shared through `allow`
497 allow_high_water_mark: Cell<*const u8>,
498
499 /// Process flash segment. This is the region of nonvolatile flash that
500 /// the process occupies.
501 flash: &'static [u8],
502
503 /// The footers of the process binary (may be zero-sized), which are metadata
504 /// about the process not covered by integrity. Used, among other things, to
505 /// store signatures.
506 footers: &'static [u8],
507
508 /// Collection of pointers to the TBF header in flash.
509 header: tock_tbf::types::TbfHeader<'static>,
510
511 /// Credential that was approved for this process, or `None` if the
512 /// credential was permitted to run without an accepted credential.
513 credential: Option<AcceptedCredential>,
514
515 /// State saved on behalf of the process each time the app switches to the
516 /// kernel.
517 stored_state:
518 MapCell<<<C as Chip>::UserspaceKernelBoundary as UserspaceKernelBoundary>::StoredState>,
519
520 /// The current state of the app. The scheduler uses this to determine
521 /// whether it can schedule this app to execute.
522 ///
523 /// The `state` is used both for bookkeeping for the scheduler as well as
524 /// for enabling control by other parts of the system. The scheduler keeps
525 /// track of if a process is ready to run or not by switching between the
526 /// `Running` and `Yielded` states. The system can control the process by
527 /// switching it to a "stopped" state to prevent the scheduler from
528 /// scheduling it.
529 state: Cell<State>,
530
531 /// How to respond if this process faults.
532 fault_policy: &'a dyn ProcessFaultPolicy,
533
534 /// Storage permissions for this process.
535 storage_permissions: StoragePermissions,
536
537 /// Configuration data for the MPU
538 mpu_config: MapCell<<<C as Chip>::MPU as MPU>::MpuConfig>,
539
540 /// MPU regions are saved as a pointer-size pair.
541 mpu_regions: [Cell<Option<mpu::Region>>; 6],
542
543 /// Essentially a list of upcalls that want to call functions in the
544 /// process.
545 tasks: MapCell<RingBuffer<'a, Task>>,
546
547 /// Count of how many times this process has entered the fault condition and
548 /// been restarted. This is used by some `ProcessRestartPolicy`s to
549 /// determine if the process should be restarted or not.
550 restart_count: Cell<usize>,
551
552 /// The completion code set by the process when it last exited, restarted,
553 /// or was terminated. If the process is has never terminated, then the
554 /// `OptionalCell` will be empty (i.e. `None`). If the process has exited,
555 /// restarted, or terminated, the `OptionalCell` will contain an optional 32
556 /// bit value. The option will be `None` if the process crashed or was
557 /// stopped by the kernel and there is no provided completion code. If the
558 /// process called the exit syscall then the provided completion code will
559 /// be stored as `Some(completion code)`.
560 completion_code: OptionalCell<Option<u32>>,
561
562 /// Flag that stores whether this process has a task that is ready when
563 /// the process is in the [`State::YieldedFor`] state.
564 is_yield_wait_for_ready: Cell<bool>,
565
566 /// Values kept so that we can print useful debug messages when apps fault.
567 debug: D,
568}
569
570impl<C: Chip, D: 'static + ProcessStandardDebug> Process for ProcessStandard<'_, C, D> {
571 fn processid(&self) -> ProcessId {
572 self.process_id.get()
573 }
574
575 fn short_app_id(&self) -> ShortId {
576 self.app_id
577 }
578
579 fn binary_version(&self) -> Option<BinaryVersion> {
580 let version = self.header.get_binary_version();
581 match NonZeroU32::new(version) {
582 Some(version_nonzero) => Some(BinaryVersion::new(version_nonzero)),
583 None => None,
584 }
585 }
586
587 fn get_credential(&self) -> Option<AcceptedCredential> {
588 self.credential
589 }
590
591 fn enqueue_task(&self, task: Task) -> Result<(), ErrorCode> {
592 // If this app is in a `Fault` state then we shouldn't schedule
593 // any work for it.
594 if !self.is_running() {
595 return Err(ErrorCode::NODEVICE);
596 }
597
598 let ret = self.tasks.map_or(Err(ErrorCode::FAIL), |tasks| {
599 match tasks.enqueue(task) {
600 true => {
601 // If the process is yielded-for this task, set the ready flag.
602 if let State::YieldedFor(yielded_upcall_id) = self.state.get() {
603 if let Some(upcall_id) = match task {
604 Task::FunctionCall(FunctionCall {
605 source: FunctionCallSource::Driver(upcall_id),
606 ..
607 }) => Some(upcall_id),
608 Task::ReturnValue(ReturnArguments { upcall_id, .. }) => Some(upcall_id),
609 _ => None,
610 } {
611 self.is_yield_wait_for_ready
612 .set(upcall_id == yielded_upcall_id);
613 }
614 }
615 // The task has been successfully enqueued.
616 Ok(())
617 }
618 false => {
619 // The task could not be enqueued as there is
620 // insufficient space in the ring buffer.
621 Err(ErrorCode::NOMEM)
622 }
623 }
624 });
625
626 if ret.is_err() {
627 // On any error we were unable to enqueue the task. Record the
628 // error, but importantly do _not_ increment kernel work.
629 self.debug.increment_dropped_upcall_count();
630 }
631
632 ret
633 }
634
635 fn ready(&self) -> bool {
636 match self.state.get() {
637 State::Running => true,
638 State::YieldedFor(_) => self.is_yield_wait_for_ready.get(),
639 State::Yielded => self.tasks.map_or(false, |ring_buf| ring_buf.has_elements()),
640 _ => false,
641 }
642 }
643
644 fn remove_pending_upcalls(&self, upcall_id: UpcallId) -> usize {
645 self.tasks.map_or(0, |tasks| {
646 let count_before = tasks.len();
647 tasks.retain(|task| match task {
648 // Remove only tasks that are function calls with an id equal
649 // to `upcall_id`.
650 Task::FunctionCall(function_call) => match function_call.source {
651 FunctionCallSource::Kernel => true,
652 FunctionCallSource::Driver(id) => id != upcall_id,
653 },
654 _ => true,
655 });
656 let count_after = tasks.len();
657 if config::CONFIG.trace_syscalls {
658 debug!(
659 "[{:?}] remove_pending_upcalls[{:#x}:{}] = {} upcall(s) removed",
660 self.processid(),
661 upcall_id.driver_num,
662 upcall_id.subscribe_num,
663 count_before - count_after,
664 );
665 }
666 count_before - count_after
667 })
668 }
669
670 fn is_running(&self) -> bool {
671 match self.state.get() {
672 State::Running | State::Yielded | State::YieldedFor(_) | State::Stopped(_) => true,
673 _ => false,
674 }
675 }
676
677 fn get_state(&self) -> State {
678 self.state.get()
679 }
680
681 fn set_yielded_state(&self) {
682 if self.state.get() == State::Running {
683 self.state.set(State::Yielded);
684 }
685 }
686
687 fn set_yielded_for_state(&self, upcall_id: UpcallId) {
688 if self.state.get() == State::Running {
689 self.state.set(State::YieldedFor(upcall_id));
690
691 // Verify if the process has a task that this yield waits for
692 self.is_yield_wait_for_ready
693 .set(self.tasks.map_or(false, |tasks| {
694 tasks
695 .find_first_matching(|task| match task {
696 Task::ReturnValue(ReturnArguments { upcall_id: id, .. }) => {
697 upcall_id == *id
698 }
699 Task::FunctionCall(FunctionCall {
700 source: FunctionCallSource::Driver(id),
701 ..
702 }) => upcall_id == *id,
703 _ => false,
704 })
705 .is_some()
706 }));
707 }
708 }
709
710 fn stop(&self) {
711 match self.state.get() {
712 State::Running => self.state.set(State::Stopped(StoppedState::Running)),
713 State::Yielded => self.state.set(State::Stopped(StoppedState::Yielded)),
714 State::YieldedFor(upcall_id) => self
715 .state
716 .set(State::Stopped(StoppedState::YieldedFor(upcall_id))),
717 State::Stopped(_stopped_state) => {
718 // Already stopped, nothing to do.
719 }
720 State::Faulted | State::Terminated => {
721 // Stop has no meaning on a inactive process.
722 }
723 }
724 }
725
726 fn resume(&self) {
727 if let State::Stopped(stopped_state) = self.state.get() {
728 match stopped_state {
729 StoppedState::Running => self.state.set(State::Running),
730 StoppedState::Yielded => self.state.set(State::Yielded),
731 StoppedState::YieldedFor(upcall_id) => self.set_yielded_for_state(upcall_id),
732 }
733 }
734 }
735
736 fn set_fault_state(&self) {
737 // Use the per-process fault policy to determine what action the kernel
738 // should take since the process faulted.
739 let action = self.fault_policy.action(self);
740 match action {
741 FaultAction::Panic => {
742 // process faulted. Panic and print status
743 self.state.set(State::Faulted);
744 panic!("Process {} had a fault", self.get_process_name());
745 }
746 FaultAction::Restart => {
747 self.try_restart(None);
748 }
749 FaultAction::Stop => {
750 // This looks a lot like restart, except we just leave the app
751 // how it faulted and mark it as `Faulted`. By clearing
752 // all of the app's todo work it will not be scheduled, and
753 // clearing all of the grant regions will cause capsules to drop
754 // this app as well.
755 self.terminate(None);
756 self.state.set(State::Faulted);
757 }
758 }
759 }
760
761 fn start(&self, _cap: &dyn crate::capabilities::ProcessStartCapability) {
762 // `start()` can only be called on a terminated process.
763 if self.get_state() != State::Terminated {
764 return;
765 }
766
767 // Reset to start the process.
768 if let Ok(()) = self.reset() {
769 self.state.set(State::Yielded);
770 }
771 }
772
773 fn try_restart(&self, completion_code: Option<u32>) {
774 // `try_restart()` cannot be called if the process is terminated. Only
775 // `start()` can start a terminated process.
776 if self.get_state() == State::Terminated {
777 return;
778 }
779
780 // Terminate the process, freeing its state and removing any
781 // pending tasks from the scheduler's queue.
782 self.terminate(completion_code);
783
784 // If there is a kernel policy that controls restarts, it should be
785 // implemented here. For now, always restart.
786 if let Ok(()) = self.reset() {
787 self.state.set(State::Yielded);
788 }
789
790 // Decide what to do with res later. E.g., if we can't restart
791 // want to reclaim the process resources.
792 }
793
794 fn terminate(&self, completion_code: Option<u32>) {
795 // A process can be terminated if it is running or in the `Faulted`
796 // state. Otherwise, you cannot terminate it and this method return
797 // early.
798 //
799 // The kernel can terminate in the `Faulted` state to return the process
800 // to a state in which it can run again (e.g., reset it).
801 if !self.is_running() && self.get_state() != State::Faulted {
802 return;
803 }
804
805 // And remove those tasks
806 self.tasks.map(|tasks| {
807 tasks.empty();
808 });
809
810 // Clear any grant regions this app has setup with any capsules.
811 unsafe {
812 self.grant_ptrs_reset();
813 }
814
815 // Save the completion code.
816 self.completion_code.set(completion_code);
817
818 // Mark the app as stopped so the scheduler won't try to run it.
819 self.state.set(State::Terminated);
820 }
821
822 fn get_restart_count(&self) -> usize {
823 self.restart_count.get()
824 }
825
826 fn has_tasks(&self) -> bool {
827 self.tasks.map_or(false, |tasks| tasks.has_elements())
828 }
829
830 fn dequeue_task(&self) -> Option<Task> {
831 self.tasks.map_or(None, |tasks| tasks.dequeue())
832 }
833
834 fn remove_upcall(&self, upcall_id: UpcallId) -> Option<Task> {
835 self.tasks.map_or(None, |tasks| {
836 tasks.remove_first_matching(|task| match task {
837 Task::FunctionCall(fc) => match fc.source {
838 FunctionCallSource::Driver(upid) => upid == upcall_id,
839 _ => false,
840 },
841 Task::ReturnValue(rv) => rv.upcall_id == upcall_id,
842 Task::IPC(_) => false,
843 })
844 })
845 }
846
847 fn pending_tasks(&self) -> usize {
848 self.tasks.map_or(0, |tasks| tasks.len())
849 }
850
851 fn get_command_permissions(&self, driver_num: usize, offset: usize) -> CommandPermissions {
852 self.header.get_command_permissions(driver_num, offset)
853 }
854
855 fn get_storage_permissions(&self) -> StoragePermissions {
856 self.storage_permissions
857 }
858
859 fn number_writeable_flash_regions(&self) -> usize {
860 self.header.number_writeable_flash_regions()
861 }
862
863 fn get_writeable_flash_region(&self, region_index: usize) -> (usize, usize) {
864 self.header.get_writeable_flash_region(region_index)
865 }
866
867 fn update_stack_start_pointer(&self, stack_pointer: *const u8) {
868 if stack_pointer >= self.mem_start() && stack_pointer < self.mem_end() {
869 self.debug.set_app_stack_start_pointer(stack_pointer);
870 // We also reset the minimum stack pointer because whatever
871 // value we had could be entirely wrong by now.
872 self.debug.set_app_stack_min_pointer(stack_pointer);
873 }
874 }
875
876 fn update_heap_start_pointer(&self, heap_pointer: *const u8) {
877 if heap_pointer >= self.mem_start() && heap_pointer < self.mem_end() {
878 self.debug.set_app_heap_start_pointer(heap_pointer);
879 }
880 }
881
882 fn setup_mpu(&self) {
883 self.mpu_config.map(|config| {
884 // # Safety
885 //
886 // `configure_mpu` is unsafe, as invoking it with an incorrect
887 // configuration can allow an untrusted application to access
888 // kernel-private memory.
889 //
890 // This call is safe given we trust that the implementation of
891 // `ProcessStandard` correctly provisions a set of MPU regions that
892 // does not grant access to any kernel-private memory, and
893 // `ProcessStandard` does not provide safe, publically accessible
894 // APIs to add other arbitrary MPU regions to this configuration.
895 unsafe {
896 self.chip.mpu().configure_mpu(config);
897 }
898 });
899 }
900
901 fn add_mpu_region(
902 &self,
903 unallocated_memory_start: *const u8,
904 unallocated_memory_size: usize,
905 min_region_size: usize,
906 ) -> Option<mpu::Region> {
907 self.mpu_config.and_then(|config| {
908 let new_region = self.chip.mpu().allocate_region(
909 unallocated_memory_start,
910 unallocated_memory_size,
911 min_region_size,
912 mpu::Permissions::ReadWriteOnly,
913 config,
914 )?;
915
916 for region in self.mpu_regions.iter() {
917 if region.get().is_none() {
918 region.set(Some(new_region));
919 return Some(new_region);
920 }
921 }
922
923 // Not enough room in Process struct to store the MPU region.
924 None
925 })
926 }
927
928 fn remove_mpu_region(&self, region: mpu::Region) -> Result<(), ErrorCode> {
929 self.mpu_config.map_or(Err(ErrorCode::INVAL), |config| {
930 // Find the existing mpu region that we are removing; it needs to match exactly.
931 if let Some(internal_region) = self.mpu_regions.iter().find(|r| r.get() == Some(region))
932 {
933 self.chip
934 .mpu()
935 .remove_memory_region(region, config)
936 .or(Err(ErrorCode::FAIL))?;
937
938 // Remove this region from the tracking cache of mpu_regions
939 internal_region.set(None);
940 Ok(())
941 } else {
942 Err(ErrorCode::INVAL)
943 }
944 })
945 }
946
947 fn sbrk(&self, increment: isize) -> Result<CapabilityPtr, Error> {
948 // Do not modify an inactive process.
949 if !self.is_running() {
950 return Err(Error::InactiveApp);
951 }
952
953 let new_break = self.app_break.get().wrapping_offset(increment);
954 self.brk(new_break)
955 }
956
957 fn brk(&self, new_break: *const u8) -> Result<CapabilityPtr, Error> {
958 // Do not modify an inactive process.
959 if !self.is_running() {
960 return Err(Error::InactiveApp);
961 }
962
963 self.mpu_config.map_or(Err(Error::KernelError), |config| {
964 if new_break < self.allow_high_water_mark.get() || new_break >= self.mem_end() {
965 Err(Error::AddressOutOfBounds)
966 } else if new_break > self.kernel_memory_break.get() {
967 Err(Error::OutOfMemory)
968 } else if let Err(()) = self.chip.mpu().update_app_memory_region(
969 new_break,
970 self.kernel_memory_break.get(),
971 mpu::Permissions::ReadWriteOnly,
972 config,
973 ) {
974 Err(Error::OutOfMemory)
975 } else {
976 let old_break = self.app_break.get();
977 self.app_break.set(new_break);
978
979 // # Safety
980 //
981 // `configure_mpu` is unsafe, as invoking it with an incorrect
982 // configuration can allow an untrusted application to access
983 // kernel-private memory.
984 //
985 // This call is safe given we trust that the implementation of
986 // `ProcessStandard` correctly provisions a set of MPU regions
987 // that does not grant access to any kernel-private memory, and
988 // `ProcessStandard` does not provide safe, publically
989 // accessible APIs to add other arbitrary MPU regions to this
990 // configuration.
991 unsafe {
992 self.chip.mpu().configure_mpu(config);
993 }
994
995 let base = self.mem_start() as usize;
996 let break_result = unsafe {
997 CapabilityPtr::new_with_authority(
998 old_break as *const (),
999 base,
1000 (new_break as usize) - base,
1001 CapabilityPtrPermissions::ReadWrite,
1002 )
1003 };
1004
1005 Ok(break_result)
1006 }
1007 })
1008 }
1009
1010 #[allow(clippy::not_unsafe_ptr_arg_deref)]
1011 fn build_readwrite_process_buffer(
1012 &self,
1013 buf_start_addr: *mut u8,
1014 size: usize,
1015 ) -> Result<ReadWriteProcessBuffer, ErrorCode> {
1016 if !self.is_running() {
1017 // Do not operate on an inactive process
1018 return Err(ErrorCode::FAIL);
1019 }
1020
1021 // A process is allowed to pass any pointer if the buffer length is 0,
1022 // as to revoke kernel access to a memory region without granting access
1023 // to another one
1024 if size == 0 {
1025 // Clippy complains that we're dereferencing a pointer in a public
1026 // and safe function here. While we are not dereferencing the
1027 // pointer here, we pass it along to an unsafe function, which is as
1028 // dangerous (as it is likely to be dereferenced down the line).
1029 //
1030 // Relevant discussion:
1031 // https://github.com/rust-lang/rust-clippy/issues/3045
1032 //
1033 // It should be fine to ignore the lint here, as a buffer of length
1034 // 0 will never allow dereferencing any memory in a safe manner.
1035 //
1036 // ### Safety
1037 //
1038 // We specify a zero-length buffer, so the implementation of
1039 // `ReadWriteProcessBuffer` will handle any safety issues.
1040 // Therefore, we can encapsulate the unsafe.
1041 Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, 0, self.processid()) })
1042 } else if self.in_app_owned_memory(buf_start_addr, size) {
1043 // TODO: Check for buffer aliasing here
1044
1045 // Valid buffer, we need to adjust the app's watermark
1046 // note: `in_app_owned_memory` ensures this offset does not wrap
1047 let buf_end_addr = buf_start_addr.wrapping_add(size);
1048 let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
1049 self.allow_high_water_mark.set(new_water_mark);
1050
1051 // Clippy complains that we're dereferencing a pointer in a public
1052 // and safe function here. While we are not dereferencing the
1053 // pointer here, we pass it along to an unsafe function, which is as
1054 // dangerous (as it is likely to be dereferenced down the line).
1055 //
1056 // Relevant discussion:
1057 // https://github.com/rust-lang/rust-clippy/issues/3045
1058 //
1059 // It should be fine to ignore the lint here, as long as we make
1060 // sure that we're pointing towards userspace memory (verified using
1061 // `in_app_owned_memory`) and respect alignment and other
1062 // constraints of the Rust references created by
1063 // `ReadWriteProcessBuffer`.
1064 //
1065 // ### Safety
1066 //
1067 // We encapsulate the unsafe here on the condition in the TODO
1068 // above, as we must ensure that this `ReadWriteProcessBuffer` will
1069 // be the only reference to this memory.
1070 Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, size, self.processid()) })
1071 } else {
1072 Err(ErrorCode::INVAL)
1073 }
1074 }
1075
1076 #[allow(clippy::not_unsafe_ptr_arg_deref)]
1077 fn build_readonly_process_buffer(
1078 &self,
1079 buf_start_addr: *const u8,
1080 size: usize,
1081 ) -> Result<ReadOnlyProcessBuffer, ErrorCode> {
1082 if !self.is_running() {
1083 // Do not operate on an inactive process
1084 return Err(ErrorCode::FAIL);
1085 }
1086
1087 // A process is allowed to pass any pointer if the buffer length is 0,
1088 // as to revoke kernel access to a memory region without granting access
1089 // to another one
1090 if size == 0 {
1091 // Clippy complains that we're dereferencing a pointer in a public
1092 // and safe function here. While we are not dereferencing the
1093 // pointer here, we pass it along to an unsafe function, which is as
1094 // dangerous (as it is likely to be dereferenced down the line).
1095 //
1096 // Relevant discussion:
1097 // https://github.com/rust-lang/rust-clippy/issues/3045
1098 //
1099 // It should be fine to ignore the lint here, as a buffer of length
1100 // 0 will never allow dereferencing any memory in a safe manner.
1101 //
1102 // ### Safety
1103 //
1104 // We specify a zero-length buffer, so the implementation of
1105 // `ReadOnlyProcessBuffer` will handle any safety issues. Therefore,
1106 // we can encapsulate the unsafe.
1107 Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, 0, self.processid()) })
1108 } else if self.in_app_owned_memory(buf_start_addr, size)
1109 || self.in_app_flash_memory(buf_start_addr, size)
1110 {
1111 // TODO: Check for buffer aliasing here
1112
1113 if self.in_app_owned_memory(buf_start_addr, size) {
1114 // Valid buffer, and since this is in read-write memory (i.e.
1115 // not flash), we need to adjust the process's watermark. Note:
1116 // `in_app_owned_memory()` ensures this offset does not wrap.
1117 let buf_end_addr = buf_start_addr.wrapping_add(size);
1118 let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
1119 self.allow_high_water_mark.set(new_water_mark);
1120 }
1121
1122 // Clippy complains that we're dereferencing a pointer in a public
1123 // and safe function here. While we are not dereferencing the
1124 // pointer here, we pass it along to an unsafe function, which is as
1125 // dangerous (as it is likely to be dereferenced down the line).
1126 //
1127 // Relevant discussion:
1128 // https://github.com/rust-lang/rust-clippy/issues/3045
1129 //
1130 // It should be fine to ignore the lint here, as long as we make
1131 // sure that we're pointing towards userspace memory (verified using
1132 // `in_app_owned_memory` or `in_app_flash_memory`) and respect
1133 // alignment and other constraints of the Rust references created by
1134 // `ReadWriteProcessBuffer`.
1135 //
1136 // ### Safety
1137 //
1138 // We encapsulate the unsafe here on the condition in the TODO
1139 // above, as we must ensure that this `ReadOnlyProcessBuffer` will
1140 // be the only reference to this memory.
1141 Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, size, self.processid()) })
1142 } else {
1143 Err(ErrorCode::INVAL)
1144 }
1145 }
1146
1147 unsafe fn set_byte(&self, addr: *mut u8, value: u8) -> bool {
1148 if self.in_app_owned_memory(addr, 1) {
1149 // We verify that this will only write process-accessible memory,
1150 // but this can still be undefined behavior if something else holds
1151 // a reference to this memory.
1152 *addr = value;
1153 true
1154 } else {
1155 false
1156 }
1157 }
1158
1159 fn grant_is_allocated(&self, grant_num: usize) -> Option<bool> {
1160 // Do not modify an inactive process.
1161 if !self.is_running() {
1162 return None;
1163 }
1164
1165 // Update the grant pointer to the address of the new allocation.
1166 self.grant_pointers.map_or(None, |grant_pointers| {
1167 // Implement `grant_pointers[grant_num]` without a chance of a
1168 // panic.
1169 grant_pointers
1170 .get(grant_num)
1171 .map(|grant_entry| !grant_entry.grant_ptr.is_null())
1172 })
1173 }
1174
1175 fn allocate_grant(
1176 &self,
1177 grant_num: usize,
1178 driver_num: usize,
1179 size: usize,
1180 align: usize,
1181 ) -> Result<(), ()> {
1182 // Do not modify an inactive process.
1183 if !self.is_running() {
1184 return Err(());
1185 }
1186
1187 // Verify the grant_num is valid.
1188 if grant_num >= self.kernel.get_grant_count_and_finalize() {
1189 return Err(());
1190 }
1191
1192 // Verify that the grant is not already allocated. If the pointer is not
1193 // null then the grant is already allocated.
1194 if let Some(is_allocated) = self.grant_is_allocated(grant_num) {
1195 if is_allocated {
1196 return Err(());
1197 }
1198 }
1199
1200 // Verify that there is not already a grant allocated with the same
1201 // `driver_num`.
1202 let exists = self.grant_pointers.map_or(false, |grant_pointers| {
1203 // Check our list of grant pointers if the driver number is used.
1204 grant_pointers.iter().any(|grant_entry| {
1205 // Check if the grant is both allocated (its grant pointer is
1206 // non null) and the driver number matches.
1207 (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
1208 })
1209 });
1210 // If we find a match, then the `driver_num` must already be used and
1211 // the grant allocation fails.
1212 if exists {
1213 return Err(());
1214 }
1215
1216 // Use the shared grant allocator function to actually allocate memory.
1217 // Returns `None` if the allocation cannot be created.
1218 if let Some(grant_ptr) = self.allocate_in_grant_region_internal(size, align) {
1219 // Update the grant pointer to the address of the new allocation.
1220 self.grant_pointers.map_or(Err(()), |grant_pointers| {
1221 // Implement `grant_pointers[grant_num] = grant_ptr` without a
1222 // chance of a panic.
1223 grant_pointers
1224 .get_mut(grant_num)
1225 .map_or(Err(()), |grant_entry| {
1226 // Actually set the driver num and grant pointer.
1227 grant_entry.driver_num = driver_num;
1228 grant_entry.grant_ptr = grant_ptr.as_ptr();
1229
1230 // If all of this worked, return true.
1231 Ok(())
1232 })
1233 })
1234 } else {
1235 // Could not allocate the memory for the grant region.
1236 Err(())
1237 }
1238 }
1239
1240 fn allocate_custom_grant(
1241 &self,
1242 size: usize,
1243 align: usize,
1244 ) -> Result<(ProcessCustomGrantIdentifier, NonNull<u8>), ()> {
1245 // Do not modify an inactive process.
1246 if !self.is_running() {
1247 return Err(());
1248 }
1249
1250 // Use the shared grant allocator function to actually allocate memory.
1251 // Returns `None` if the allocation cannot be created.
1252 if let Some(ptr) = self.allocate_in_grant_region_internal(size, align) {
1253 // Create the identifier that the caller will use to get access to
1254 // this custom grant in the future.
1255 let identifier = self.create_custom_grant_identifier(ptr);
1256
1257 Ok((identifier, ptr))
1258 } else {
1259 // Could not allocate memory for the custom grant.
1260 Err(())
1261 }
1262 }
1263
1264 fn enter_grant(&self, grant_num: usize) -> Result<NonNull<u8>, Error> {
1265 // Do not try to access the grant region of an inactive process.
1266 if !self.is_running() {
1267 return Err(Error::InactiveApp);
1268 }
1269
1270 // Retrieve the grant pointer from the `grant_pointers` slice. We use
1271 // `[slice].get()` so that if the grant number is invalid this will
1272 // return `Err` and not panic.
1273 self.grant_pointers
1274 .map_or(Err(Error::KernelError), |grant_pointers| {
1275 // Implement `grant_pointers[grant_num]` without a chance of a
1276 // panic.
1277 match grant_pointers.get_mut(grant_num) {
1278 Some(grant_entry) => {
1279 // Get a copy of the actual grant pointer.
1280 let grant_ptr = grant_entry.grant_ptr;
1281
1282 // Check if the grant pointer is marked that the grant
1283 // has already been entered. If so, return an error.
1284 if (grant_ptr as usize) & 0x1 == 0x1 {
1285 // Lowest bit is one, meaning this grant has been
1286 // entered.
1287 Err(Error::AlreadyInUse)
1288 } else {
1289 // Now, to mark that the grant has been entered, we
1290 // set the lowest bit to one and save this as the
1291 // grant pointer.
1292 grant_entry.grant_ptr = (grant_ptr as usize | 0x1) as *mut u8;
1293
1294 // And we return the grant pointer to the entered
1295 // grant.
1296 Ok(unsafe { NonNull::new_unchecked(grant_ptr) })
1297 }
1298 }
1299 None => Err(Error::AddressOutOfBounds),
1300 }
1301 })
1302 }
1303
1304 fn enter_custom_grant(
1305 &self,
1306 identifier: ProcessCustomGrantIdentifier,
1307 ) -> Result<*mut u8, Error> {
1308 // Do not try to access the grant region of an inactive process.
1309 if !self.is_running() {
1310 return Err(Error::InactiveApp);
1311 }
1312
1313 // Get the address of the custom grant based on the identifier.
1314 let custom_grant_address = self.get_custom_grant_address(identifier);
1315
1316 // We never deallocate custom grants and only we can change the
1317 // `identifier` so we know this is a valid address for the custom grant.
1318 Ok(custom_grant_address as *mut u8)
1319 }
1320
1321 unsafe fn leave_grant(&self, grant_num: usize) {
1322 // Do not modify an inactive process.
1323 if !self.is_running() {
1324 return;
1325 }
1326
1327 self.grant_pointers.map(|grant_pointers| {
1328 // Implement `grant_pointers[grant_num]` without a chance of a
1329 // panic.
1330 if let Some(grant_entry) = grant_pointers.get_mut(grant_num) {
1331 // Get a copy of the actual grant pointer.
1332 let grant_ptr = grant_entry.grant_ptr;
1333
1334 // Now, to mark that the grant has been released, we set the
1335 // lowest bit back to zero and save this as the grant
1336 // pointer.
1337 grant_entry.grant_ptr = (grant_ptr as usize & !0x1) as *mut u8;
1338 }
1339 });
1340 }
1341
1342 fn grant_allocated_count(&self) -> Option<usize> {
1343 // Do not modify an inactive process.
1344 if !self.is_running() {
1345 return None;
1346 }
1347
1348 self.grant_pointers.map(|grant_pointers| {
1349 // Filter our list of grant pointers into just the non-null ones,
1350 // and count those. A grant is allocated if its grant pointer is
1351 // non-null.
1352 grant_pointers
1353 .iter()
1354 .filter(|grant_entry| !grant_entry.grant_ptr.is_null())
1355 .count()
1356 })
1357 }
1358
1359 fn lookup_grant_from_driver_num(&self, driver_num: usize) -> Result<usize, Error> {
1360 self.grant_pointers
1361 .map_or(Err(Error::KernelError), |grant_pointers| {
1362 // Filter our list of grant pointers into just the non null
1363 // ones, and count those. A grant is allocated if its grant
1364 // pointer is non-null.
1365 match grant_pointers.iter().position(|grant_entry| {
1366 // Only consider allocated grants.
1367 (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
1368 }) {
1369 Some(idx) => Ok(idx),
1370 None => Err(Error::OutOfMemory),
1371 }
1372 })
1373 }
1374
1375 fn is_valid_upcall_function_pointer(&self, upcall_fn: *const ()) -> bool {
1376 let ptr = upcall_fn as *const u8;
1377 let size = mem::size_of::<*const u8>();
1378
1379 // It is okay if this function is in memory or flash.
1380 self.in_app_flash_memory(ptr, size) || self.in_app_owned_memory(ptr, size)
1381 }
1382
1383 fn get_process_name(&self) -> &'static str {
1384 self.header.get_package_name().unwrap_or("")
1385 }
1386
1387 fn get_completion_code(&self) -> Option<Option<u32>> {
1388 self.completion_code.get()
1389 }
1390
1391 fn set_syscall_return_value(&self, return_value: SyscallReturn) {
1392 match self.stored_state.map(|stored_state| unsafe {
1393 // Actually set the return value for a particular process.
1394 //
1395 // The UKB implementation uses the bounds of process-accessible
1396 // memory to verify that any memory changes are valid. Here, the
1397 // unsafe promise we are making is that the bounds passed to the UKB
1398 // are correct.
1399 self.chip
1400 .userspace_kernel_boundary()
1401 .set_syscall_return_value(
1402 self.mem_start(),
1403 self.app_break.get(),
1404 stored_state,
1405 return_value,
1406 )
1407 }) {
1408 Some(Ok(())) => {
1409 // If we get an `Ok` we are all set.
1410
1411 // The process is either already in the running state (having
1412 // just called a nonblocking syscall like command) or needs to
1413 // be moved to the running state having called Yield-WaitFor and
1414 // now needing to be resumed. Either way we can set the state to
1415 // running.
1416 self.state.set(State::Running);
1417 // The task is running, if it was yielded-for an upcall,
1418 // the upcall must have been scheduled, unset
1419 // the ready flag.
1420 self.is_yield_wait_for_ready.set(false);
1421 }
1422
1423 Some(Err(())) => {
1424 // If we get an `Err`, then the UKB implementation could not set
1425 // the return value, likely because the process's stack is no
1426 // longer accessible to it. All we can do is fault.
1427 self.set_fault_state();
1428 }
1429
1430 None => {
1431 // We should never be here since `stored_state` should always be
1432 // occupied.
1433 self.set_fault_state();
1434 }
1435 }
1436 }
1437
1438 fn set_process_function(&self, callback: FunctionCall) {
1439 // See if we can actually enqueue this function for this process.
1440 // Architecture-specific code handles actually doing this since the
1441 // exact method is both architecture- and implementation-specific.
1442 //
1443 // This can fail, for example if the process does not have enough memory
1444 // remaining.
1445 match self.stored_state.map(|stored_state| {
1446 // Let the UKB implementation handle setting the process's PC so
1447 // that the process executes the upcall function. We encapsulate
1448 // unsafe here because we are guaranteeing that the memory bounds
1449 // passed to `set_process_function` are correct.
1450 unsafe {
1451 self.chip.userspace_kernel_boundary().set_process_function(
1452 self.mem_start(),
1453 self.app_break.get(),
1454 stored_state,
1455 callback,
1456 )
1457 }
1458 }) {
1459 Some(Ok(())) => {
1460 // If we got an `Ok` we are all set and should mark that this
1461 // process is ready to be scheduled.
1462
1463 // Move this process to the "running" state so the scheduler
1464 // will schedule it.
1465 self.state.set(State::Running);
1466 }
1467
1468 Some(Err(())) => {
1469 // If we got an Error, then there was likely not enough room on
1470 // the stack to allow the process to execute this function given
1471 // the details of the particular architecture this is running
1472 // on. This process has essentially faulted, so we mark it as
1473 // such.
1474 self.set_fault_state();
1475 }
1476
1477 None => {
1478 // We should never be here since `stored_state` should always be
1479 // occupied.
1480 self.set_fault_state();
1481 }
1482 }
1483 }
1484
1485 fn switch_to(&self) -> Option<syscall::ContextSwitchReason> {
1486 // Cannot switch to an invalid process
1487 if !self.is_running() {
1488 return None;
1489 }
1490
1491 let (switch_reason, stack_pointer) =
1492 self.stored_state.map_or((None, None), |stored_state| {
1493 // Switch to the process. We guarantee that the memory pointers
1494 // we pass are valid, ensuring this context switch is safe.
1495 // Therefore we encapsulate the `unsafe`.
1496 unsafe {
1497 let (switch_reason, optional_stack_pointer) = self
1498 .chip
1499 .userspace_kernel_boundary()
1500 .switch_to_process(self.mem_start(), self.app_break.get(), stored_state);
1501 (Some(switch_reason), optional_stack_pointer)
1502 }
1503 });
1504
1505 // If the UKB implementation passed us a stack pointer, update our
1506 // debugging state. This is completely optional.
1507 if let Some(sp) = stack_pointer {
1508 self.debug.set_new_app_stack_min_pointer(sp);
1509 }
1510
1511 switch_reason
1512 }
1513
1514 fn debug_syscall_count(&self) -> usize {
1515 self.debug.get_syscall_count()
1516 }
1517
1518 fn debug_dropped_upcall_count(&self) -> usize {
1519 self.debug.get_dropped_upcall_count()
1520 }
1521
1522 fn debug_timeslice_expiration_count(&self) -> usize {
1523 self.debug.get_timeslice_expiration_count()
1524 }
1525
1526 fn debug_timeslice_expired(&self) {
1527 self.debug.increment_timeslice_expiration_count();
1528 }
1529
1530 fn debug_syscall_called(&self, last_syscall: Syscall) {
1531 self.debug.increment_syscall_count();
1532 self.debug.set_last_syscall(last_syscall);
1533 }
1534
1535 fn debug_syscall_last(&self) -> Option<Syscall> {
1536 self.debug.get_last_syscall()
1537 }
1538
1539 fn get_addresses(&self) -> ProcessAddresses {
1540 ProcessAddresses {
1541 flash_start: self.flash_start() as usize,
1542 flash_non_protected_start: self.flash_non_protected_start() as usize,
1543 flash_integrity_end: ((self.flash.as_ptr() as usize)
1544 + (self.header.get_binary_end() as usize))
1545 as *const u8,
1546 flash_end: self.flash_end() as usize,
1547 sram_start: self.mem_start() as usize,
1548 sram_app_brk: self.app_memory_break() as usize,
1549 sram_grant_start: self.kernel_memory_break() as usize,
1550 sram_end: self.mem_end() as usize,
1551 sram_heap_start: self.debug.get_app_heap_start_pointer().map(|p| p as usize),
1552 sram_stack_top: self.debug.get_app_stack_start_pointer().map(|p| p as usize),
1553 sram_stack_bottom: self.debug.get_app_stack_min_pointer().map(|p| p as usize),
1554 }
1555 }
1556
1557 fn get_sizes(&self) -> ProcessSizes {
1558 ProcessSizes {
1559 grant_pointers: mem::size_of::<GrantPointerEntry>()
1560 * self.kernel.get_grant_count_and_finalize(),
1561 upcall_list: Self::CALLBACKS_OFFSET,
1562 process_control_block: Self::PROCESS_STRUCT_OFFSET,
1563 }
1564 }
1565
1566 fn print_full_process(&self, writer: &mut dyn Write) {
1567 if !config::CONFIG.debug_panics {
1568 return;
1569 }
1570
1571 self.stored_state.map(|stored_state| {
1572 // We guarantee the memory bounds pointers provided to the UKB are
1573 // correct.
1574 unsafe {
1575 self.chip.userspace_kernel_boundary().print_context(
1576 self.mem_start(),
1577 self.app_break.get(),
1578 stored_state,
1579 writer,
1580 );
1581 }
1582 });
1583
1584 // Display grant information.
1585 let number_grants = self.kernel.get_grant_count_and_finalize();
1586 let _ = writer.write_fmt(format_args!(
1587 "\
1588 \r\n Total number of grant regions defined: {}\r\n",
1589 self.kernel.get_grant_count_and_finalize()
1590 ));
1591 let rows = number_grants.div_ceil(3);
1592
1593 // Access our array of grant pointers.
1594 self.grant_pointers.map(|grant_pointers| {
1595 // Iterate each grant and show its address.
1596 for i in 0..rows {
1597 for j in 0..3 {
1598 let index = i + (rows * j);
1599 if index >= number_grants {
1600 break;
1601 }
1602
1603 // Implement `grant_pointers[grant_num]` without a chance of
1604 // a panic.
1605 grant_pointers.get(index).map(|grant_entry| {
1606 if grant_entry.grant_ptr.is_null() {
1607 let _ =
1608 writer.write_fmt(format_args!(" Grant {:>2} : -- ", index));
1609 } else {
1610 let _ = writer.write_fmt(format_args!(
1611 " Grant {:>2} {:#x}: {:p}",
1612 index, grant_entry.driver_num, grant_entry.grant_ptr
1613 ));
1614 }
1615 });
1616 }
1617 let _ = writer.write_fmt(format_args!("\r\n"));
1618 }
1619 });
1620
1621 // Display the current state of the MPU for this process.
1622 self.mpu_config.map(|config| {
1623 let _ = writer.write_fmt(format_args!("{}", config));
1624 });
1625
1626 // Print a helpful message on how to re-compile a process to view the
1627 // listing file. If a process is PIC, then we also need to print the
1628 // actual addresses the process executed at so that the .lst file can be
1629 // generated for those addresses. If the process was already compiled
1630 // for a fixed address, then just generating a .lst file is fine.
1631
1632 if self.debug.get_fixed_address_flash().is_some() {
1633 // Fixed addresses, can just run `make lst`.
1634 let _ = writer.write_fmt(format_args!(
1635 "\
1636 \r\nTo debug libtock-c apps, run `make lst` in the app's\
1637 \r\nfolder and open the arch.{:#x}.{:#x}.lst file.\r\n\r\n",
1638 self.debug.get_fixed_address_flash().unwrap_or(0),
1639 self.debug.get_fixed_address_ram().unwrap_or(0)
1640 ));
1641 } else {
1642 // PIC, need to specify the addresses.
1643 let sram_start = self.mem_start() as usize;
1644 let flash_start = self.flash.as_ptr() as usize;
1645 let flash_init_fn = flash_start + self.header.get_init_function_offset() as usize;
1646
1647 let _ = writer.write_fmt(format_args!(
1648 "\
1649 \r\nTo debug libtock-c apps, run\
1650 \r\n`make debug RAM_START={:#x} FLASH_INIT={:#x}`\
1651 \r\nin the app's folder and open the .lst file.\r\n\r\n",
1652 sram_start, flash_init_fn
1653 ));
1654 }
1655 }
1656
1657 fn get_stored_state(&self, out: &mut [u8]) -> Result<usize, ErrorCode> {
1658 self.stored_state
1659 .map(|stored_state| {
1660 self.chip
1661 .userspace_kernel_boundary()
1662 .store_context(stored_state, out)
1663 })
1664 .unwrap_or(Err(ErrorCode::FAIL))
1665 }
1666}
1667
1668impl<C: 'static + Chip, D: 'static + ProcessStandardDebug> ProcessStandard<'_, C, D> {
1669 // Memory offset for upcall ring buffer (10 element length).
1670 const CALLBACK_LEN: usize = 10;
1671 const CALLBACKS_OFFSET: usize = mem::size_of::<Task>() * Self::CALLBACK_LEN;
1672
1673 // Memory offset to make room for this process's metadata.
1674 const PROCESS_STRUCT_OFFSET: usize = mem::size_of::<ProcessStandard<C, D>>();
1675
1676 /// Create a `ProcessStandard` object based on the found `ProcessBinary`.
1677 pub(crate) unsafe fn create(
1678 kernel: &'static Kernel,
1679 chip: &'static C,
1680 pb: ProcessBinary,
1681 remaining_memory: *mut [u8],
1682 fault_policy: &'static dyn ProcessFaultPolicy,
1683 storage_permissions_policy: &'static dyn ProcessStandardStoragePermissionsPolicy<C, D>,
1684 app_id: ShortId,
1685 index: usize,
1686 ) -> Result<(Option<&'static dyn Process>, *mut [u8]), (ProcessLoadError, *mut [u8])> {
1687 let process_name = pb.header.get_package_name();
1688 let process_ram_requested_size = pb.header.get_minimum_app_ram_size() as usize;
1689
1690 // Initialize MPU region configuration.
1691 let mut mpu_config = match chip.mpu().new_config() {
1692 Some(mpu_config) => mpu_config,
1693 None => return Err((ProcessLoadError::MpuConfigurationError, remaining_memory)),
1694 };
1695
1696 // Allocate MPU region for flash.
1697 if chip
1698 .mpu()
1699 .allocate_region(
1700 pb.flash.as_ptr(),
1701 pb.flash.len(),
1702 pb.flash.len(),
1703 mpu::Permissions::ReadExecuteOnly,
1704 &mut mpu_config,
1705 )
1706 .is_none()
1707 {
1708 if config::CONFIG.debug_load_processes {
1709 debug!(
1710 "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate MPU region for flash",
1711 pb.flash.as_ptr() as usize,
1712 pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1713 process_name
1714 );
1715 }
1716 return Err((ProcessLoadError::MpuInvalidFlashLength, remaining_memory));
1717 }
1718
1719 // Determine how much space we need in the application's memory space
1720 // just for kernel and grant state. We need to make sure we allocate
1721 // enough memory just for that.
1722
1723 // Make room for grant pointers.
1724 let grant_ptr_size = mem::size_of::<GrantPointerEntry>();
1725 let grant_ptrs_num = kernel.get_grant_count_and_finalize();
1726 let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
1727
1728 // Initial size of the kernel-owned part of process memory can be
1729 // calculated directly based on the initial size of all kernel-owned
1730 // data structures.
1731 //
1732 // We require our kernel memory break (located at the end of the
1733 // MPU-returned allocated memory region) to be word-aligned. However, we
1734 // don't have any explicit alignment constraints from the MPU. To ensure
1735 // that the below kernel-owned data structures still fit into the
1736 // kernel-owned memory even with padding for alignment, add an extra
1737 // `sizeof(usize)` bytes.
1738 let initial_kernel_memory_size = grant_ptrs_offset
1739 + Self::CALLBACKS_OFFSET
1740 + Self::PROCESS_STRUCT_OFFSET
1741 + core::mem::size_of::<usize>();
1742
1743 // By default we start with the initial size of process-accessible
1744 // memory set to 0. This maximizes the flexibility that processes have
1745 // to allocate their memory as they see fit. If a process needs more
1746 // accessible memory it must use the `brk` memop syscalls to request
1747 // more memory.
1748 //
1749 // We must take into account any process-accessible memory required by
1750 // the context switching implementation and allocate at least that much
1751 // memory so that we can successfully switch to the process. This is
1752 // architecture and implementation specific, so we query that now.
1753 let min_process_memory_size = chip
1754 .userspace_kernel_boundary()
1755 .initial_process_app_brk_size();
1756
1757 // We have to ensure that we at least ask the MPU for
1758 // `min_process_memory_size` so that we can be sure that `app_brk` is
1759 // not set inside the kernel-owned memory region. Now, in practice,
1760 // processes should not request 0 (or very few) bytes of memory in their
1761 // TBF header (i.e. `process_ram_requested_size` will almost always be
1762 // much larger than `min_process_memory_size`), as they are unlikely to
1763 // work with essentially no available memory. But, we still must protect
1764 // for that case.
1765 let min_process_ram_size = cmp::max(process_ram_requested_size, min_process_memory_size);
1766
1767 // Minimum memory size for the process.
1768 let min_total_memory_size = min_process_ram_size + initial_kernel_memory_size;
1769
1770 // Check if this process requires a fixed memory start address. If so,
1771 // try to adjust the memory region to work for this process.
1772 //
1773 // Right now, we only support skipping some RAM and leaving a chunk
1774 // unused so that the memory region starts where the process needs it
1775 // to.
1776 let remaining_memory = if let Some(fixed_memory_start) = pb
1777 .header
1778 .get_fixed_address_ram()
1779 .map(|addr: u32| remaining_memory.cast::<u8>().with_addr(addr as usize))
1780 {
1781 // The process does have a fixed address.
1782 if fixed_memory_start == remaining_memory.cast() {
1783 // Address already matches.
1784 remaining_memory
1785 } else if fixed_memory_start > remaining_memory.cast() {
1786 // Process wants a memory address farther in memory. Try to
1787 // advance the memory region to make the address match.
1788 let diff = fixed_memory_start.addr() - remaining_memory.addr();
1789 if diff > remaining_memory.len() {
1790 // We ran out of memory.
1791 let actual_address = (remaining_memory.cast::<u8>())
1792 .wrapping_byte_add(remaining_memory.len())
1793 .wrapping_byte_sub(1);
1794 let expected_address = fixed_memory_start;
1795 return Err((
1796 ProcessLoadError::MemoryAddressMismatch {
1797 actual_address,
1798 expected_address,
1799 },
1800 remaining_memory,
1801 ));
1802 } else {
1803 // Change the memory range to start where the process
1804 // requested it. Because of the if statement above we know this should
1805 // work. Doing it more cleanly would be good but was a bit beyond my borrow
1806 // ken; calling get_mut has a mutable borrow.-pal
1807 let (_, sliced) = raw_slice_split_at_mut(remaining_memory, diff);
1808 sliced
1809 }
1810 } else {
1811 // Address is earlier in memory, nothing we can do.
1812 let actual_address = remaining_memory.cast();
1813 let expected_address = fixed_memory_start;
1814 return Err((
1815 ProcessLoadError::MemoryAddressMismatch {
1816 actual_address,
1817 expected_address,
1818 },
1819 remaining_memory,
1820 ));
1821 }
1822 } else {
1823 remaining_memory
1824 };
1825
1826 // Determine where process memory will go and allocate an MPU region.
1827 //
1828 // `[allocation_start, allocation_size)` will cover both
1829 //
1830 // - the app-owned `min_process_memory_size`-long part of memory (at
1831 // some offset within `remaining_memory`), as well as
1832 //
1833 // - the kernel-owned allocation growing downward starting at the end
1834 // of this allocation, `initial_kernel_memory_size` bytes long.
1835 //
1836 let (allocation_start, allocation_size) = match chip.mpu().allocate_app_memory_region(
1837 remaining_memory.cast(),
1838 remaining_memory.len(),
1839 min_total_memory_size,
1840 min_process_memory_size,
1841 initial_kernel_memory_size,
1842 mpu::Permissions::ReadWriteOnly,
1843 &mut mpu_config,
1844 ) {
1845 Some((memory_start, memory_size)) => (memory_start, memory_size),
1846 None => {
1847 // Failed to load process. Insufficient memory.
1848 if config::CONFIG.debug_load_processes {
1849 debug!(
1850 "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate memory region of size >= {:#X}",
1851 pb.flash.as_ptr() as usize,
1852 pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1853 process_name,
1854 min_total_memory_size
1855 );
1856 }
1857 return Err((ProcessLoadError::NotEnoughMemory, remaining_memory));
1858 }
1859 };
1860
1861 // Determine the offset of the app-owned part of the above memory
1862 // allocation. An MPU may not place it at the very start of
1863 // `remaining_memory` for internal alignment constraints. This can only
1864 // overflow if the MPU implementation is incorrect; a compliant
1865 // implementation must return a memory allocation within the
1866 // `remaining_memory` slice.
1867 let app_memory_start_offset = allocation_start.addr() - remaining_memory.addr();
1868
1869 // Check if the memory region is valid for the process. If a process
1870 // included a fixed address for the start of RAM in its TBF header (this
1871 // field is optional, processes that are position independent do not
1872 // need a fixed address) then we check that we used the same address
1873 // when we allocated it in RAM.
1874 if let Some(fixed_memory_start) = pb
1875 .header
1876 .get_fixed_address_ram()
1877 .map(|addr: u32| remaining_memory.cast::<u8>().with_addr(addr as usize))
1878 {
1879 let actual_address = remaining_memory
1880 .cast::<u8>()
1881 .wrapping_byte_add(app_memory_start_offset);
1882 let expected_address = fixed_memory_start;
1883 if actual_address != expected_address {
1884 return Err((
1885 ProcessLoadError::MemoryAddressMismatch {
1886 actual_address,
1887 expected_address,
1888 },
1889 remaining_memory,
1890 ));
1891 }
1892 }
1893
1894 // With our MPU allocation, we can begin to divide up the
1895 // `remaining_memory` slice into individual regions for the process and
1896 // kernel, as follows:
1897 //
1898 //
1899 // +-----------------------------------------------------------------
1900 // | remaining_memory
1901 // +----------------------------------------------------+------------
1902 // v v
1903 // +----------------------------------------------------+
1904 // | allocated_padded_memory |
1905 // +--+-------------------------------------------------+
1906 // v v
1907 // +-------------------------------------------------+
1908 // | allocated_memory |
1909 // +-------------------------------------------------+
1910 // v v
1911 // +-----------------------+-------------------------+
1912 // | app_accessible_memory | allocated_kernel_memory |
1913 // +-----------------------+-------------------+-----+
1914 // v
1915 // kernel memory break
1916 // \---+/
1917 // v
1918 // optional padding
1919 //
1920 //
1921 // First split the `remaining_memory` into two slices:
1922 //
1923 // - `allocated_padded_memory`: the allocated memory region, containing
1924 //
1925 // 1. optional padding at the start of the memory region of
1926 // `app_memory_start_offset` bytes,
1927 //
1928 // 2. the app accessible memory region of `min_process_memory_size`,
1929 //
1930 // 3. optional unallocated memory, and
1931 //
1932 // 4. kernel-reserved memory, growing downward starting at
1933 // `app_memory_padding`.
1934 //
1935 // - `unused_memory`: the rest of the `remaining_memory`, not assigned
1936 // to this app.
1937 //
1938 let (allocated_padded_memory, unused_memory) =
1939 raw_slice_split_at_mut(remaining_memory, app_memory_start_offset + allocation_size);
1940
1941 // Now, slice off the (optional) padding at the start:
1942 let (_padding, allocated_memory) =
1943 raw_slice_split_at_mut(allocated_padded_memory, app_memory_start_offset);
1944
1945 // We continue to sub-slice the `allocated_memory` into
1946 // process-accessible and kernel-owned memory. Prior to that, store the
1947 // start and length ofthe overall allocation:
1948 let allocated_memory_start = allocated_memory.cast();
1949 let allocated_memory_len = allocated_memory.len();
1950
1951 // Slice off the process-accessible memory:
1952 let (app_accessible_memory, allocated_kernel_memory) =
1953 raw_slice_split_at_mut(allocated_memory, min_process_memory_size);
1954
1955 // Set the initial process-accessible memory:
1956 let initial_app_brk = app_accessible_memory
1957 .cast::<u8>()
1958 .add(app_accessible_memory.len());
1959
1960 // Set the initial allow high water mark to the start of process memory
1961 // since no `allow` calls have been made yet.
1962 let initial_allow_high_water_mark = app_accessible_memory.cast();
1963
1964 // Set up initial grant region.
1965 //
1966 // `kernel_memory_break` is set to the end of kernel-accessible memory
1967 // and grows downward.
1968 //
1969 // We require the `kernel_memory_break` to be aligned to a
1970 // word-boundary, as we rely on this during offset calculations to
1971 // kernel-accessed structs (e.g. the grant pointer table) below. As it
1972 // moves downward in the address space, we can't use the `align_offset`
1973 // convenience functions.
1974 //
1975 // Calling `wrapping_sub` is safe here, as we've factored in an optional
1976 // padding of at most `sizeof(usize)` bytes in the calculation of
1977 // `initial_kernel_memory_size` above.
1978 let mut kernel_memory_break = allocated_kernel_memory
1979 .cast::<u8>()
1980 .add(allocated_kernel_memory.len());
1981
1982 kernel_memory_break = kernel_memory_break
1983 .wrapping_sub(kernel_memory_break as usize % core::mem::size_of::<usize>());
1984
1985 // Now that we know we have the space we can setup the grant pointers.
1986 kernel_memory_break = kernel_memory_break.offset(-(grant_ptrs_offset as isize));
1987
1988 // This is safe, `kernel_memory_break` is aligned to a word-boundary,
1989 // and `grant_ptrs_offset` is a multiple of the word size.
1990 #[allow(clippy::cast_ptr_alignment)]
1991 // Set all grant pointers to null.
1992 let grant_pointers: &mut [MaybeUninit<GrantPointerEntry>] = slice::from_raw_parts_mut(
1993 kernel_memory_break as *mut MaybeUninit<GrantPointerEntry>,
1994 grant_ptrs_num,
1995 );
1996 for grant_entry in grant_pointers.iter_mut() {
1997 grant_entry.write(GrantPointerEntry {
1998 driver_num: 0,
1999 grant_ptr: core::ptr::null_mut(),
2000 });
2001 }
2002 // Safety: All values in this slice have been properly initialized.
2003 let grant_pointers = maybe_uninit_slice_assume_init_mut(grant_pointers);
2004
2005 // Now that we know we have the space we can setup the memory for the
2006 // upcalls.
2007 kernel_memory_break = kernel_memory_break.offset(-(Self::CALLBACKS_OFFSET as isize));
2008
2009 // This is safe today, as MPU constraints ensure that `memory_start`
2010 // will always be aligned on at least a word boundary, and that
2011 // memory_size will be aligned on at least a word boundary, and
2012 // `grant_ptrs_offset` is a multiple of the word size. Thus,
2013 // `kernel_memory_break` must be word aligned. While this is unlikely to
2014 // change, it should be more proactively enforced.
2015 //
2016 // TODO: https://github.com/tock/tock/issues/1739
2017 #[allow(clippy::cast_ptr_alignment)]
2018 // Set up ring buffer for upcalls to the process.
2019 let upcall_buf =
2020 slice::from_raw_parts_mut(kernel_memory_break as *mut Task, Self::CALLBACK_LEN);
2021 let tasks = RingBuffer::new(upcall_buf);
2022
2023 // Last thing in the kernel region of process RAM is the process struct.
2024 kernel_memory_break = kernel_memory_break.offset(-(Self::PROCESS_STRUCT_OFFSET as isize));
2025 let process_struct_memory_location = kernel_memory_break;
2026
2027 // Create the Process struct in the app grant region.
2028 // Note that this requires every field be explicitly initialized, as
2029 // we are just transforming a pointer into a structure.
2030 let process: &mut ProcessStandard<C, D> =
2031 &mut *(process_struct_memory_location as *mut ProcessStandard<'static, C, D>);
2032
2033 // Ask the kernel for a unique identifier for this process that is being
2034 // created.
2035 let unique_identifier = kernel.create_process_identifier();
2036
2037 // Save copies of these in case the app was compiled for fixed addresses
2038 // for later debugging.
2039 let fixed_address_flash = pb.header.get_fixed_address_flash();
2040 let fixed_address_ram = pb.header.get_fixed_address_ram();
2041
2042 process
2043 .process_id
2044 .set(ProcessId::new(kernel, unique_identifier, index));
2045 process.app_id = app_id;
2046 process.kernel = kernel;
2047 process.chip = chip;
2048 process.allow_high_water_mark = Cell::new(initial_allow_high_water_mark);
2049 process.memory_start = allocated_memory_start;
2050 process.memory_len = allocated_memory_len;
2051 process.header = pb.header;
2052 process.kernel_memory_break = Cell::new(kernel_memory_break);
2053 process.app_break = Cell::new(initial_app_brk);
2054 process.grant_pointers = MapCell::new(grant_pointers);
2055
2056 process.credential = pb.credential.get();
2057 process.footers = pb.footers;
2058 process.flash = pb.flash;
2059
2060 process.stored_state = MapCell::new(Default::default());
2061 // Mark this process as approved and leave it to the kernel to start it.
2062 process.state = Cell::new(State::Yielded);
2063 process.fault_policy = fault_policy;
2064 process.restart_count = Cell::new(0);
2065 process.completion_code = OptionalCell::empty();
2066
2067 process.mpu_config = MapCell::new(mpu_config);
2068 process.mpu_regions = [
2069 Cell::new(None),
2070 Cell::new(None),
2071 Cell::new(None),
2072 Cell::new(None),
2073 Cell::new(None),
2074 Cell::new(None),
2075 ];
2076 process.tasks = MapCell::new(tasks);
2077 process.is_yield_wait_for_ready = Cell::new(false);
2078
2079 process.debug = D::default();
2080 if let Some(fix_addr_flash) = fixed_address_flash {
2081 process.debug.set_fixed_address_flash(fix_addr_flash);
2082 }
2083 if let Some(fix_addr_ram) = fixed_address_ram {
2084 process.debug.set_fixed_address_ram(fix_addr_ram);
2085 }
2086
2087 // Handle any architecture-specific requirements for a new process.
2088 //
2089 // NOTE! We have to ensure that the start of process-accessible memory
2090 // (`app_memory_start`) is word-aligned. Since we currently start
2091 // process-accessible memory at the beginning of the allocated memory
2092 // region, we trust the MPU to give us a word-aligned starting address.
2093 //
2094 // TODO: https://github.com/tock/tock/issues/1739
2095 match process.stored_state.map(|stored_state| {
2096 chip.userspace_kernel_boundary().initialize_process(
2097 app_accessible_memory.cast(),
2098 initial_app_brk,
2099 stored_state,
2100 )
2101 }) {
2102 Some(Ok(())) => {}
2103 _ => {
2104 if config::CONFIG.debug_load_processes {
2105 debug!(
2106 "[!] flash={:#010X}-{:#010X} process={:?} - couldn't initialize process",
2107 pb.flash.as_ptr() as usize,
2108 pb.flash.as_ptr() as usize + pb.flash.len() - 1,
2109 process_name
2110 );
2111 }
2112 // Note that since remaining_memory was split by split_at_mut into
2113 // application memory and unused_memory, a failure here will leak
2114 // the application memory. Not leaking it requires being able to
2115 // reconstitute the original memory slice.
2116 return Err((ProcessLoadError::InternalError, unused_memory));
2117 }
2118 }
2119
2120 let flash_start = process.flash.as_ptr();
2121 let app_start =
2122 flash_start.wrapping_add(process.header.get_app_start_offset() as usize) as usize;
2123 let init_addr =
2124 flash_start.wrapping_add(process.header.get_init_function_offset() as usize) as usize;
2125 let fn_base = flash_start as usize;
2126 let fn_len = process.flash.len();
2127
2128 // We need to construct a capability with sufficient authority to cover all of a user's
2129 // code, with permissions to execute it. The entirety of flash is sufficient.
2130
2131 let init_fn = CapabilityPtr::new_with_authority(
2132 init_addr as *const (),
2133 fn_base,
2134 fn_len,
2135 CapabilityPtrPermissions::Execute,
2136 );
2137
2138 process.tasks.map(|tasks| {
2139 tasks.enqueue(Task::FunctionCall(FunctionCall {
2140 source: FunctionCallSource::Kernel,
2141 pc: init_fn,
2142 argument0: app_start,
2143 argument1: process.memory_start as usize,
2144 argument2: process.memory_len,
2145 argument3: (process.app_break.get() as usize).into(),
2146 }));
2147 });
2148
2149 // Set storage permissions. Put this at the end so that `process` is
2150 // completely formed before using it to determine the storage
2151 // permissions.
2152 process.storage_permissions = storage_permissions_policy.get_permissions(process);
2153
2154 // Return the process object and a remaining memory for processes slice.
2155 Ok((Some(process), unused_memory))
2156 }
2157
2158 /// Reset the process, resetting all of its state and re-initializing it so
2159 /// it can start running. Assumes the process is not running but is still in
2160 /// flash and still has its memory region allocated to it.
2161 fn reset(&self) -> Result<(), ErrorCode> {
2162 // We need a new process identifier for this process since the restarted
2163 // version is in effect a new process. This is also necessary to
2164 // invalidate any stored `ProcessId`s that point to the old version of
2165 // the process. However, the process has not moved locations in the
2166 // processes array, so we copy the existing index.
2167 let old_index = self.process_id.get().index;
2168 let new_identifier = self.kernel.create_process_identifier();
2169 self.process_id
2170 .set(ProcessId::new(self.kernel, new_identifier, old_index));
2171
2172 // Reset debug information that is per-execution and not per-process.
2173 self.debug.reset_last_syscall();
2174 self.debug.reset_syscall_count();
2175 self.debug.reset_dropped_upcall_count();
2176 self.debug.reset_timeslice_expiration_count();
2177
2178 // Reset MPU region configuration.
2179 //
2180 // TODO: ideally, this would be moved into a helper function used by
2181 // both create() and reset(), but process load debugging complicates
2182 // this. We just want to create new config with only flash and memory
2183 // regions.
2184 //
2185 // We must have a previous MPU configuration stored, fault the
2186 // process if this invariant is violated. We avoid allocating
2187 // a new MPU configuration, as this may eventually exhaust the
2188 // number of available MPU configurations.
2189 let mut mpu_config = self.mpu_config.take().ok_or(ErrorCode::FAIL)?;
2190 self.chip.mpu().reset_config(&mut mpu_config);
2191
2192 // Allocate MPU region for flash.
2193 let app_mpu_flash = self.chip.mpu().allocate_region(
2194 self.flash.as_ptr(),
2195 self.flash.len(),
2196 self.flash.len(),
2197 mpu::Permissions::ReadExecuteOnly,
2198 &mut mpu_config,
2199 );
2200 if app_mpu_flash.is_none() {
2201 // We were unable to allocate an MPU region for flash. This is very
2202 // unexpected since we previously ran this process. However, we
2203 // return now and leave the process faulted and it will not be
2204 // scheduled.
2205 return Err(ErrorCode::FAIL);
2206 }
2207
2208 // RAM
2209
2210 // Re-determine the minimum amount of RAM the kernel must allocate to
2211 // the process based on the specific requirements of the syscall
2212 // implementation.
2213 let min_process_memory_size = self
2214 .chip
2215 .userspace_kernel_boundary()
2216 .initial_process_app_brk_size();
2217
2218 // Recalculate initial_kernel_memory_size as was done in create()
2219 let grant_ptr_size = mem::size_of::<(usize, *mut u8)>();
2220 let grant_ptrs_num = self.kernel.get_grant_count_and_finalize();
2221 let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
2222
2223 let initial_kernel_memory_size =
2224 grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET;
2225
2226 let app_mpu_mem = self.chip.mpu().allocate_app_memory_region(
2227 self.mem_start(),
2228 self.memory_len,
2229 self.memory_len, //we want exactly as much as we had before restart
2230 min_process_memory_size,
2231 initial_kernel_memory_size,
2232 mpu::Permissions::ReadWriteOnly,
2233 &mut mpu_config,
2234 );
2235 let (app_mpu_mem_start, app_mpu_mem_len) = match app_mpu_mem {
2236 Some((start, len)) => (start, len),
2237 None => {
2238 // We couldn't configure the MPU for the process. This shouldn't
2239 // happen since we were able to start the process before, but at
2240 // this point it is better to leave the app faulted and not
2241 // schedule it.
2242 return Err(ErrorCode::NOMEM);
2243 }
2244 };
2245
2246 // Reset memory pointers now that we know the layout of the process
2247 // memory and know that we can configure the MPU.
2248
2249 // app_brk is set based on minimum syscall size above the start of
2250 // memory.
2251 let app_brk = app_mpu_mem_start.wrapping_add(min_process_memory_size);
2252 self.app_break.set(app_brk);
2253 // kernel_brk is calculated backwards from the end of memory the size of
2254 // the initial kernel data structures.
2255 let kernel_brk = app_mpu_mem_start
2256 .wrapping_add(app_mpu_mem_len)
2257 .wrapping_sub(initial_kernel_memory_size);
2258 self.kernel_memory_break.set(kernel_brk);
2259 // High water mark for `allow`ed memory is reset to the start of the
2260 // process's memory region.
2261 self.allow_high_water_mark.set(app_mpu_mem_start);
2262
2263 // Store the adjusted MPU configuration:
2264 self.mpu_config.replace(mpu_config);
2265
2266 // Handle any architecture-specific requirements for a process when it
2267 // first starts (as it would when it is new).
2268 let ukb_init_process = self.stored_state.map_or(Err(()), |stored_state| unsafe {
2269 self.chip.userspace_kernel_boundary().initialize_process(
2270 app_mpu_mem_start,
2271 app_brk,
2272 stored_state,
2273 )
2274 });
2275 match ukb_init_process {
2276 Ok(()) => {}
2277 Err(()) => {
2278 // We couldn't initialize the architecture-specific state for
2279 // this process. This shouldn't happen since the app was able to
2280 // be started before, but at this point the app is no longer
2281 // valid. The best thing we can do now is leave the app as still
2282 // faulted and not schedule it.
2283 return Err(ErrorCode::RESERVE);
2284 }
2285 }
2286
2287 self.restart_count.increment();
2288
2289 // Mark the state as `Yielded` for the scheduler.
2290 self.state.set(State::Yielded);
2291
2292 // And queue up this app to be restarted.
2293 let flash_start = self.flash_start();
2294 let app_start =
2295 flash_start.wrapping_add(self.header.get_app_start_offset() as usize) as usize;
2296 let init_addr =
2297 flash_start.wrapping_add(self.header.get_init_function_offset() as usize) as usize;
2298
2299 // We need to construct a capability with sufficient authority to cover all of a user's
2300 // code, with permissions to execute it. The entirety of flash is sufficient.
2301
2302 let init_fn = unsafe {
2303 CapabilityPtr::new_with_authority(
2304 init_addr as *const (),
2305 flash_start as usize,
2306 (self.flash_end() as usize) - (flash_start as usize),
2307 CapabilityPtrPermissions::Execute,
2308 )
2309 };
2310
2311 self.enqueue_task(Task::FunctionCall(FunctionCall {
2312 source: FunctionCallSource::Kernel,
2313 pc: init_fn,
2314 argument0: app_start,
2315 argument1: self.memory_start as usize,
2316 argument2: self.memory_len,
2317 argument3: (self.app_break.get() as usize).into(),
2318 }))
2319 }
2320
2321 /// Checks if the buffer represented by the passed in base pointer and size
2322 /// is within the RAM bounds currently exposed to the processes (i.e. ending
2323 /// at `app_break`). If this method returns `true`, the buffer is guaranteed
2324 /// to be accessible to the process and to not overlap with the grant
2325 /// region.
2326 fn in_app_owned_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
2327 // TODO: On some platforms, CapabilityPtr has sufficient authority that we
2328 // could skip this check.
2329 // CapabilityPtr needs to make it slightly further, and we need to add
2330 // interfaces that tell us how much assurance it gives on the current
2331 // platform.
2332 let buf_end_addr = buf_start_addr.wrapping_add(size);
2333
2334 buf_end_addr >= buf_start_addr
2335 && buf_start_addr >= self.mem_start()
2336 && buf_end_addr <= self.app_break.get()
2337 }
2338
2339 /// Checks if the buffer represented by the passed in base pointer and size
2340 /// are within the readable region of an application's flash memory. If
2341 /// this method returns true, the buffer is guaranteed to be readable to the
2342 /// process.
2343 fn in_app_flash_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
2344 // TODO: On some platforms, CapabilityPtr has sufficient authority that we
2345 // could skip this check.
2346 // CapabilityPtr needs to make it slightly further, and we need to add
2347 // interfaces that tell us how much assurance it gives on the current
2348 // platform.
2349 let buf_end_addr = buf_start_addr.wrapping_add(size);
2350
2351 buf_end_addr >= buf_start_addr
2352 && buf_start_addr >= self.flash_non_protected_start()
2353 && buf_end_addr <= self.flash_end()
2354 }
2355
2356 /// Reset all `grant_ptr`s to NULL.
2357 unsafe fn grant_ptrs_reset(&self) {
2358 self.grant_pointers.map(|grant_pointers| {
2359 for grant_entry in grant_pointers.iter_mut() {
2360 grant_entry.driver_num = 0;
2361 grant_entry.grant_ptr = ptr::null_mut();
2362 }
2363 });
2364 }
2365
2366 /// Allocate memory in a process's grant region.
2367 ///
2368 /// Ensures that the allocation is of `size` bytes and aligned to `align`
2369 /// bytes.
2370 ///
2371 /// If there is not enough memory, or the MPU cannot isolate the process
2372 /// accessible region from the new kernel memory break after doing the
2373 /// allocation, then this will return `None`.
2374 fn allocate_in_grant_region_internal(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
2375 self.mpu_config.and_then(|config| {
2376 // First, compute the candidate new pointer. Note that at this point
2377 // we have not yet checked whether there is space for this
2378 // allocation or that it meets alignment requirements.
2379 let new_break_unaligned = self.kernel_memory_break.get().wrapping_sub(size);
2380
2381 // Our minimum alignment requirement is two bytes, so that the
2382 // lowest bit of the address will always be zero and we can use it
2383 // as a flag. It doesn't hurt to increase the alignment (except for
2384 // potentially a wasted byte) so we make sure `align` is at least
2385 // two.
2386 let align = cmp::max(align, 2);
2387
2388 // The alignment must be a power of two, 2^a. The expression
2389 // `!(align - 1)` then returns a mask with leading ones, followed by
2390 // `a` trailing zeros.
2391 let alignment_mask = !(align - 1);
2392 let new_break = (new_break_unaligned as usize & alignment_mask) as *const u8;
2393
2394 // Verify there is space for this allocation
2395 if new_break < self.app_break.get() {
2396 None
2397 // Verify it didn't wrap around
2398 } else if new_break > self.kernel_memory_break.get() {
2399 None
2400 // Verify this is compatible with the MPU.
2401 } else if let Err(()) = self.chip.mpu().update_app_memory_region(
2402 self.app_break.get(),
2403 new_break,
2404 mpu::Permissions::ReadWriteOnly,
2405 config,
2406 ) {
2407 None
2408 } else {
2409 // Allocation is valid.
2410
2411 // We always allocate down, so we must lower the
2412 // kernel_memory_break.
2413 self.kernel_memory_break.set(new_break);
2414
2415 // We need `grant_ptr` as a mutable pointer.
2416 let grant_ptr = new_break as *mut u8;
2417
2418 // ### Safety
2419 //
2420 // Here we are guaranteeing that `grant_ptr` is not null. We can
2421 // ensure this because we just created `grant_ptr` based on the
2422 // process's allocated memory, and we know it cannot be null.
2423 unsafe { Some(NonNull::new_unchecked(grant_ptr)) }
2424 }
2425 })
2426 }
2427
2428 /// Create the identifier for a custom grant that grant.rs uses to access
2429 /// the custom grant.
2430 ///
2431 /// We create this identifier by calculating the number of bytes between
2432 /// where the custom grant starts and the end of the process memory.
2433 fn create_custom_grant_identifier(&self, ptr: NonNull<u8>) -> ProcessCustomGrantIdentifier {
2434 let custom_grant_address = ptr.as_ptr() as usize;
2435 let process_memory_end = self.mem_end() as usize;
2436
2437 ProcessCustomGrantIdentifier {
2438 offset: process_memory_end - custom_grant_address,
2439 }
2440 }
2441
2442 /// Use a `ProcessCustomGrantIdentifier` to find the address of the
2443 /// custom grant.
2444 ///
2445 /// This reverses `create_custom_grant_identifier()`.
2446 fn get_custom_grant_address(&self, identifier: ProcessCustomGrantIdentifier) -> usize {
2447 let process_memory_end = self.mem_end() as usize;
2448
2449 // Subtract the offset in the identifier from the end of the process
2450 // memory to get the address of the custom grant.
2451 process_memory_end - identifier.offset
2452 }
2453
2454 /// Return the app's read and modify storage permissions from the TBF header
2455 /// if it exists.
2456 ///
2457 /// If the header does not exist then return `None`. If the header does
2458 /// exist, this returns a 5-tuple with:
2459 ///
2460 /// - `write_allowed`: bool. If this process should have write permissions.
2461 /// - `read_count`: usize. How many read IDs are valid.
2462 /// - `read_ids`: [u32]. The read IDs.
2463 /// - `modify_count`: usze. How many modify IDs are valid.
2464 /// - `modify_ids`: [u32]. The modify IDs.
2465 pub fn get_tbf_storage_permissions(&self) -> Option<(bool, usize, [u32; 8], usize, [u32; 8])> {
2466 let read_perms = self.header.get_storage_read_ids();
2467 let modify_perms = self.header.get_storage_modify_ids();
2468
2469 match (read_perms, modify_perms) {
2470 (Some((read_count, read_ids)), Some((modify_count, modify_ids))) => Some((
2471 self.header.get_storage_write_id().is_some(),
2472 read_count,
2473 read_ids,
2474 modify_count,
2475 modify_ids,
2476 )),
2477 _ => None,
2478 }
2479 }
2480
2481 /// The start address of allocated RAM for this process.
2482 fn mem_start(&self) -> *const u8 {
2483 self.memory_start
2484 }
2485
2486 /// The first address after the end of the allocated RAM for this process.
2487 fn mem_end(&self) -> *const u8 {
2488 self.memory_start.wrapping_add(self.memory_len)
2489 }
2490
2491 /// The start address of the flash region allocated for this process.
2492 fn flash_start(&self) -> *const u8 {
2493 self.flash.as_ptr()
2494 }
2495
2496 /// Get the first address of process's flash that isn't protected by the
2497 /// kernel. The protected range of flash contains the TBF header and
2498 /// potentially other state the kernel is storing on behalf of the process,
2499 /// and cannot be edited by the process.
2500 fn flash_non_protected_start(&self) -> *const u8 {
2501 ((self.flash.as_ptr() as usize) + self.header.get_protected_size() as usize) as *const u8
2502 }
2503
2504 /// The first address after the end of the flash region allocated for this
2505 /// process.
2506 fn flash_end(&self) -> *const u8 {
2507 self.flash.as_ptr().wrapping_add(self.flash.len())
2508 }
2509
2510 /// The lowest address of the grant region for the process.
2511 fn kernel_memory_break(&self) -> *const u8 {
2512 self.kernel_memory_break.get()
2513 }
2514
2515 /// Return the highest address the process has access to, or the current
2516 /// process memory brk.
2517 fn app_memory_break(&self) -> *const u8 {
2518 self.app_break.get()
2519 }
2520}