kernel/
process_standard.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Tock default Process implementation.
6//!
7//! `ProcessStandard` is an implementation for a userspace process running on
8//! the Tock kernel.
9
10use core::cell::Cell;
11use core::cmp;
12use core::fmt::Write;
13use core::num::NonZeroU32;
14use core::ptr::NonNull;
15use core::{mem, ptr, slice, str};
16
17use crate::collections::queue::Queue;
18use crate::collections::ring_buffer::RingBuffer;
19use crate::config;
20use crate::debug;
21use crate::errorcode::ErrorCode;
22use crate::kernel::Kernel;
23use crate::platform::chip::Chip;
24use crate::platform::mpu::{self, MPU};
25use crate::process::BinaryVersion;
26use crate::process::ProcessBinary;
27use crate::process::{Error, FunctionCall, FunctionCallSource, Process, Task};
28use crate::process::{FaultAction, ProcessCustomGrantIdentifier, ProcessId};
29use crate::process::{ProcessAddresses, ProcessSizes, ShortId};
30use crate::process::{State, StoppedState};
31use crate::process_checker::AcceptedCredential;
32use crate::process_loading::ProcessLoadError;
33use crate::process_policies::ProcessFaultPolicy;
34use crate::process_policies::ProcessStandardStoragePermissionsPolicy;
35use crate::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer};
36use crate::storage_permissions::StoragePermissions;
37use crate::syscall::{self, Syscall, SyscallReturn, UserspaceKernelBoundary};
38use crate::upcall::UpcallId;
39use crate::utilities::capability_ptr::{CapabilityPtr, CapabilityPtrPermissions};
40use crate::utilities::cells::{MapCell, NumericCellExt, OptionalCell};
41
42use tock_tbf::types::CommandPermissions;
43
44/// Interface supported by [`ProcessStandard`] for recording debug information.
45///
46/// This trait provides flexibility to users of [`ProcessStandard`] to determine
47/// how debugging information should be recorded, or if debugging information
48/// should be recorded at all.
49///
50/// Platforms that want to only maintain certain debugging information can
51/// implement only part of this trait.
52///
53/// Tock provides a default implementation of this trait on the `()` type.
54/// Kernels that wish to use [`ProcessStandard`] but do not need process-level
55/// debugging information can use `()` as the `ProcessStandardDebug` type.
56pub trait ProcessStandardDebug: Default {
57    /// Record the address in flash the process expects to start at.
58    fn set_fixed_address_flash(&self, address: u32);
59    /// Get the address in flash the process expects to start at, if it was
60    /// recorded.
61    fn get_fixed_address_flash(&self) -> Option<u32>;
62    /// Record the address in RAM the process expects to start at.
63    fn set_fixed_address_ram(&self, address: u32);
64    /// Get the address in RAM the process expects to start at, if it was
65    /// recorded.
66    fn get_fixed_address_ram(&self) -> Option<u32>;
67    /// Record the address where the process placed its heap.
68    fn set_app_heap_start_pointer(&self, ptr: *const u8);
69    /// Get the address where the process placed its heap, if it was recorded.
70    fn get_app_heap_start_pointer(&self) -> Option<*const u8>;
71    /// Record the address where the process placed its stack.
72    fn set_app_stack_start_pointer(&self, ptr: *const u8);
73    /// Get the address where the process placed its stack, if it was recorded.
74    fn get_app_stack_start_pointer(&self) -> Option<*const u8>;
75    /// Update the lowest address that the process's stack has reached.
76    fn set_app_stack_min_pointer(&self, ptr: *const u8);
77    /// Get the lowest address of the process's stack , if it was recorded.
78    fn get_app_stack_min_pointer(&self) -> Option<*const u8>;
79    /// Provide the current address of the bottom of the stack and record the
80    /// address if it is the lowest address that the process's stack has
81    /// reached.
82    fn set_new_app_stack_min_pointer(&self, ptr: *const u8);
83
84    /// Record the most recent system call the process called.
85    fn set_last_syscall(&self, syscall: Syscall);
86    /// Get the most recent system call the process called, if it was recorded.
87    fn get_last_syscall(&self) -> Option<Syscall>;
88    /// Clear any record of the most recent system call the process called.
89    fn reset_last_syscall(&self);
90
91    /// Increase the recorded count of the number of system calls the process
92    /// has called.
93    fn increment_syscall_count(&self);
94    /// Get the recorded count of the number of system calls the process has
95    /// called.
96    ///
97    /// This should return 0 if
98    /// [`ProcessStandardDebug::increment_syscall_count()`] is never called.
99    fn get_syscall_count(&self) -> usize;
100    /// Reset the recorded count of the number of system calls called by the app
101    /// to 0.
102    fn reset_syscall_count(&self);
103
104    /// Increase the recorded count of the number of upcalls that have been
105    /// dropped for the process.
106    fn increment_dropped_upcall_count(&self);
107    /// Get the recorded count of the number of upcalls that have been dropped
108    /// for the process.
109    ///
110    /// This should return 0 if
111    /// [`ProcessStandardDebug::increment_dropped_upcall_count()`] is never
112    /// called.
113    fn get_dropped_upcall_count(&self) -> usize;
114    /// Reset the recorded count of the number of upcalls that have been dropped
115    /// for the process to 0.
116    fn reset_dropped_upcall_count(&self);
117
118    /// Increase the recorded count of the number of times the process has
119    /// exceeded its timeslice.
120    fn increment_timeslice_expiration_count(&self);
121    /// Get the recorded count of the number times the process has exceeded its
122    /// timeslice.
123    ///
124    /// This should return 0 if
125    /// [`ProcessStandardDebug::increment_timeslice_expiration_count()`] is
126    /// never called.
127    fn get_timeslice_expiration_count(&self) -> usize;
128    /// Reset the recorded count of the number of the process has exceeded its
129    /// timeslice to 0.
130    fn reset_timeslice_expiration_count(&self);
131}
132
133/// A debugging implementation for [`ProcessStandard`] that records the full
134/// debugging state.
135pub struct ProcessStandardDebugFull {
136    /// Inner field for the debug state that is in a [`MapCell`] to provide
137    /// mutable access.
138    debug: MapCell<ProcessStandardDebugFullInner>,
139}
140
141/// Struct for debugging [`ProcessStandard`] processes that records the full set
142/// of debugging information.
143///
144/// These pointers and counters are not strictly required for kernel operation,
145/// but provide helpful information when an app crashes.
146#[derive(Default)]
147struct ProcessStandardDebugFullInner {
148    /// If this process was compiled for fixed addresses, save the address
149    /// it must be at in flash. This is useful for debugging and saves having
150    /// to re-parse the entire TBF header.
151    fixed_address_flash: Option<u32>,
152
153    /// If this process was compiled for fixed addresses, save the address
154    /// it must be at in RAM. This is useful for debugging and saves having
155    /// to re-parse the entire TBF header.
156    fixed_address_ram: Option<u32>,
157
158    /// Where the process has started its heap in RAM.
159    app_heap_start_pointer: Option<*const u8>,
160
161    /// Where the start of the stack is for the process. If the kernel does the
162    /// PIC setup for this app then we know this, otherwise we need the app to
163    /// tell us where it put its stack.
164    app_stack_start_pointer: Option<*const u8>,
165
166    /// How low have we ever seen the stack pointer.
167    app_stack_min_pointer: Option<*const u8>,
168
169    /// How many syscalls have occurred since the process started.
170    syscall_count: usize,
171
172    /// What was the most recent syscall.
173    last_syscall: Option<Syscall>,
174
175    /// How many upcalls were dropped because the queue was insufficiently
176    /// long.
177    dropped_upcall_count: usize,
178
179    /// How many times this process has been paused because it exceeded its
180    /// timeslice.
181    timeslice_expiration_count: usize,
182}
183
184impl ProcessStandardDebug for ProcessStandardDebugFull {
185    fn set_fixed_address_flash(&self, address: u32) {
186        self.debug.map(|d| d.fixed_address_flash = Some(address));
187    }
188    fn get_fixed_address_flash(&self) -> Option<u32> {
189        self.debug.map_or(None, |d| d.fixed_address_flash)
190    }
191    fn set_fixed_address_ram(&self, address: u32) {
192        self.debug.map(|d| d.fixed_address_ram = Some(address));
193    }
194    fn get_fixed_address_ram(&self) -> Option<u32> {
195        self.debug.map_or(None, |d| d.fixed_address_ram)
196    }
197    fn set_app_heap_start_pointer(&self, ptr: *const u8) {
198        self.debug.map(|d| d.app_heap_start_pointer = Some(ptr));
199    }
200    fn get_app_heap_start_pointer(&self) -> Option<*const u8> {
201        self.debug.map_or(None, |d| d.app_heap_start_pointer)
202    }
203    fn set_app_stack_start_pointer(&self, ptr: *const u8) {
204        self.debug.map(|d| d.app_stack_start_pointer = Some(ptr));
205    }
206    fn get_app_stack_start_pointer(&self) -> Option<*const u8> {
207        self.debug.map_or(None, |d| d.app_stack_start_pointer)
208    }
209    fn set_app_stack_min_pointer(&self, ptr: *const u8) {
210        self.debug.map(|d| d.app_stack_min_pointer = Some(ptr));
211    }
212    fn get_app_stack_min_pointer(&self) -> Option<*const u8> {
213        self.debug.map_or(None, |d| d.app_stack_min_pointer)
214    }
215    fn set_new_app_stack_min_pointer(&self, ptr: *const u8) {
216        self.debug.map(|d| {
217            match d.app_stack_min_pointer {
218                None => d.app_stack_min_pointer = Some(ptr),
219                Some(asmp) => {
220                    // Update max stack depth if needed.
221                    if ptr < asmp {
222                        d.app_stack_min_pointer = Some(ptr);
223                    }
224                }
225            }
226        });
227    }
228
229    fn set_last_syscall(&self, syscall: Syscall) {
230        self.debug.map(|d| d.last_syscall = Some(syscall));
231    }
232    fn get_last_syscall(&self) -> Option<Syscall> {
233        self.debug.map_or(None, |d| d.last_syscall)
234    }
235    fn reset_last_syscall(&self) {
236        self.debug.map(|d| d.last_syscall = None);
237    }
238
239    fn increment_syscall_count(&self) {
240        self.debug.map(|d| d.syscall_count += 1);
241    }
242    fn get_syscall_count(&self) -> usize {
243        self.debug.map_or(0, |d| d.syscall_count)
244    }
245    fn reset_syscall_count(&self) {
246        self.debug.map(|d| d.syscall_count = 0);
247    }
248
249    fn increment_dropped_upcall_count(&self) {
250        self.debug.map(|d| d.dropped_upcall_count += 1);
251    }
252    fn get_dropped_upcall_count(&self) -> usize {
253        self.debug.map_or(0, |d| d.dropped_upcall_count)
254    }
255    fn reset_dropped_upcall_count(&self) {
256        self.debug.map(|d| d.dropped_upcall_count = 0);
257    }
258
259    fn increment_timeslice_expiration_count(&self) {
260        self.debug.map(|d| d.timeslice_expiration_count += 1);
261    }
262    fn get_timeslice_expiration_count(&self) -> usize {
263        self.debug.map_or(0, |d| d.timeslice_expiration_count)
264    }
265    fn reset_timeslice_expiration_count(&self) {
266        self.debug.map(|d| d.timeslice_expiration_count = 0);
267    }
268}
269
270impl Default for ProcessStandardDebugFull {
271    fn default() -> Self {
272        Self {
273            debug: MapCell::new(ProcessStandardDebugFullInner::default()),
274        }
275    }
276}
277
278impl ProcessStandardDebug for () {
279    fn set_fixed_address_flash(&self, _address: u32) {}
280    fn get_fixed_address_flash(&self) -> Option<u32> {
281        None
282    }
283    fn set_fixed_address_ram(&self, _address: u32) {}
284    fn get_fixed_address_ram(&self) -> Option<u32> {
285        None
286    }
287    fn set_app_heap_start_pointer(&self, _ptr: *const u8) {}
288    fn get_app_heap_start_pointer(&self) -> Option<*const u8> {
289        None
290    }
291    fn set_app_stack_start_pointer(&self, _ptr: *const u8) {}
292    fn get_app_stack_start_pointer(&self) -> Option<*const u8> {
293        None
294    }
295    fn set_app_stack_min_pointer(&self, _ptr: *const u8) {}
296    fn get_app_stack_min_pointer(&self) -> Option<*const u8> {
297        None
298    }
299    fn set_new_app_stack_min_pointer(&self, _ptr: *const u8) {}
300
301    fn set_last_syscall(&self, _syscall: Syscall) {}
302    fn get_last_syscall(&self) -> Option<Syscall> {
303        None
304    }
305    fn reset_last_syscall(&self) {}
306
307    fn increment_syscall_count(&self) {}
308    fn get_syscall_count(&self) -> usize {
309        0
310    }
311    fn reset_syscall_count(&self) {}
312    fn increment_dropped_upcall_count(&self) {}
313    fn get_dropped_upcall_count(&self) -> usize {
314        0
315    }
316    fn reset_dropped_upcall_count(&self) {}
317    fn increment_timeslice_expiration_count(&self) {}
318    fn get_timeslice_expiration_count(&self) -> usize {
319        0
320    }
321    fn reset_timeslice_expiration_count(&self) {}
322}
323
324/// Entry that is stored in the grant pointer table at the top of process
325/// memory.
326///
327/// One copy of this entry struct is stored per grant region defined in the
328/// kernel. This type allows the core kernel to lookup a grant based on the
329/// driver_num associated with the grant, and also holds the pointer to the
330/// memory allocated for the particular grant.
331#[repr(C)]
332struct GrantPointerEntry {
333    /// The syscall driver number associated with the allocated grant.
334    ///
335    /// This defaults to 0 if the grant has not been allocated. Note, however,
336    /// that 0 is a valid driver_num, and therefore cannot be used to check if a
337    /// grant is allocated or not.
338    driver_num: usize,
339
340    /// The start of the memory location where the grant has been allocated, or
341    /// null if the grant has not been allocated.
342    grant_ptr: *mut u8,
343}
344
345/// A type for userspace processes in Tock.
346///
347/// As its name implies, this is the standard implementation for Tock processes
348/// that exposes the full support for processes running on embedded hardware.
349///
350/// [`ProcessStandard`] is templated on two parameters:
351///
352/// - `C`: [`Chip`]: The implementation must know the [`Chip`] the kernel is
353///   running on to properly store architecture-specific and MPU state for the
354///   process.
355/// - `D`: [`ProcessStandardDebug`]: This configures the debugging mechanism the
356///   process uses for storing optional debugging data. Kernels that do not wish
357///   to store per-process debugging state can use the `()` type for this
358///   parameter.
359pub struct ProcessStandard<'a, C: 'static + Chip, D: 'static + ProcessStandardDebug + Default> {
360    /// Identifier of this process and the index of the process in the process
361    /// table.
362    process_id: Cell<ProcessId>,
363
364    /// An application ShortId, generated from process loading and
365    /// checking, which denotes the security identity of this process.
366    app_id: ShortId,
367
368    /// Pointer to the main Kernel struct.
369    kernel: &'static Kernel,
370
371    /// Pointer to the struct that defines the actual chip the kernel is running
372    /// on. This is used because processes have subtle hardware-based
373    /// differences. Specifically, the actual syscall interface and how
374    /// processes are switched to is architecture-specific, and how memory must
375    /// be allocated for memory protection units is also hardware-specific.
376    chip: &'static C,
377
378    /// Application memory layout:
379    ///
380    /// ```text
381    ///     ╒════════ ← memory_start + memory_len
382    ///  ╔═ │ Grant Pointers
383    ///  ║  │ ──────
384    ///     │ Process Control Block
385    ///  D  │ ──────
386    ///  Y  │ Grant Regions
387    ///  N  │
388    ///  A  │   ↓
389    ///  M  │ ──────  ← kernel_memory_break
390    ///  I  │
391    ///  C  │ ──────  ← app_break               ═╗
392    ///     │                                    ║
393    ///  ║  │   ↑                                  A
394    ///  ║  │  Heap                              P C
395    ///  ╠═ │ ──────  ← app_heap_start           R C
396    ///     │  Data                              O E
397    ///  F  │ ──────  ← data_start_pointer       C S
398    ///  I  │ Stack                              E S
399    ///  X  │   ↓                                S I
400    ///  E  │                                    S B
401    ///  D  │ ──────  ← current_stack_pointer      L
402    ///     │                                    ║ E
403    ///  ╚═ ╘════════ ← memory_start            ═╝
404    /// ```
405    ///
406    /// The start of process memory. We store this as a pointer and length and
407    /// not a slice due to Rust aliasing rules. If we were to store a slice,
408    /// then any time another slice to the same memory or an ProcessBuffer is
409    /// used in the kernel would be undefined behavior.
410    memory_start: *const u8,
411    /// Number of bytes of memory allocated to this process.
412    memory_len: usize,
413
414    /// Reference to the slice of `GrantPointerEntry`s stored in the process's
415    /// memory reserved for the kernel. These driver numbers are zero and
416    /// pointers are null if the grant region has not been allocated. When the
417    /// grant region is allocated these pointers are updated to point to the
418    /// allocated memory and the driver number is set to match the driver that
419    /// owns the grant. No other reference to these pointers exists in the Tock
420    /// kernel.
421    grant_pointers: MapCell<&'static mut [GrantPointerEntry]>,
422
423    /// Pointer to the end of the allocated (and MPU protected) grant region.
424    kernel_memory_break: Cell<*const u8>,
425
426    /// Pointer to the end of process RAM that has been sbrk'd to the process.
427    app_break: Cell<*const u8>,
428
429    /// Pointer to high water mark for process buffers shared through `allow`
430    allow_high_water_mark: Cell<*const u8>,
431
432    /// Process flash segment. This is the region of nonvolatile flash that
433    /// the process occupies.
434    flash: &'static [u8],
435
436    /// The footers of the process binary (may be zero-sized), which are metadata
437    /// about the process not covered by integrity. Used, among other things, to
438    /// store signatures.
439    footers: &'static [u8],
440
441    /// Collection of pointers to the TBF header in flash.
442    header: tock_tbf::types::TbfHeader<'static>,
443
444    /// Credential that was approved for this process, or `None` if the
445    /// credential was permitted to run without an accepted credential.
446    credential: Option<AcceptedCredential>,
447
448    /// State saved on behalf of the process each time the app switches to the
449    /// kernel.
450    stored_state:
451        MapCell<<<C as Chip>::UserspaceKernelBoundary as UserspaceKernelBoundary>::StoredState>,
452
453    /// The current state of the app. The scheduler uses this to determine
454    /// whether it can schedule this app to execute.
455    ///
456    /// The `state` is used both for bookkeeping for the scheduler as well as
457    /// for enabling control by other parts of the system. The scheduler keeps
458    /// track of if a process is ready to run or not by switching between the
459    /// `Running` and `Yielded` states. The system can control the process by
460    /// switching it to a "stopped" state to prevent the scheduler from
461    /// scheduling it.
462    state: Cell<State>,
463
464    /// How to respond if this process faults.
465    fault_policy: &'a dyn ProcessFaultPolicy,
466
467    /// Storage permissions for this process.
468    storage_permissions: StoragePermissions,
469
470    /// Configuration data for the MPU
471    mpu_config: MapCell<<<C as Chip>::MPU as MPU>::MpuConfig>,
472
473    /// MPU regions are saved as a pointer-size pair.
474    mpu_regions: [Cell<Option<mpu::Region>>; 6],
475
476    /// Essentially a list of upcalls that want to call functions in the
477    /// process.
478    tasks: MapCell<RingBuffer<'a, Task>>,
479
480    /// Count of how many times this process has entered the fault condition and
481    /// been restarted. This is used by some `ProcessRestartPolicy`s to
482    /// determine if the process should be restarted or not.
483    restart_count: Cell<usize>,
484
485    /// The completion code set by the process when it last exited, restarted,
486    /// or was terminated. If the process is has never terminated, then the
487    /// `OptionalCell` will be empty (i.e. `None`). If the process has exited,
488    /// restarted, or terminated, the `OptionalCell` will contain an optional 32
489    /// bit value. The option will be `None` if the process crashed or was
490    /// stopped by the kernel and there is no provided completion code. If the
491    /// process called the exit syscall then the provided completion code will
492    /// be stored as `Some(completion code)`.
493    completion_code: OptionalCell<Option<u32>>,
494
495    /// Values kept so that we can print useful debug messages when apps fault.
496    debug: D,
497}
498
499impl<C: Chip, D: 'static + ProcessStandardDebug> Process for ProcessStandard<'_, C, D> {
500    fn processid(&self) -> ProcessId {
501        self.process_id.get()
502    }
503
504    fn short_app_id(&self) -> ShortId {
505        self.app_id
506    }
507
508    fn binary_version(&self) -> Option<BinaryVersion> {
509        let version = self.header.get_binary_version();
510        match NonZeroU32::new(version) {
511            Some(version_nonzero) => Some(BinaryVersion::new(version_nonzero)),
512            None => None,
513        }
514    }
515
516    fn get_credential(&self) -> Option<AcceptedCredential> {
517        self.credential
518    }
519
520    fn enqueue_task(&self, task: Task) -> Result<(), ErrorCode> {
521        // If this app is in a `Fault` state then we shouldn't schedule
522        // any work for it.
523        if !self.is_running() {
524            return Err(ErrorCode::NODEVICE);
525        }
526
527        let ret = self.tasks.map_or(Err(ErrorCode::FAIL), |tasks| {
528            match tasks.enqueue(task) {
529                true => {
530                    // The task has been successfully enqueued.
531                    Ok(())
532                }
533                false => {
534                    // The task could not be enqueued as there is
535                    // insufficient space in the ring buffer.
536                    Err(ErrorCode::NOMEM)
537                }
538            }
539        });
540
541        if ret.is_err() {
542            // On any error we were unable to enqueue the task. Record the
543            // error, but importantly do _not_ increment kernel work.
544            self.debug.increment_dropped_upcall_count();
545        }
546
547        ret
548    }
549
550    fn ready(&self) -> bool {
551        self.tasks.map_or(false, |ring_buf| ring_buf.has_elements())
552            || self.state.get() == State::Running
553    }
554
555    fn remove_pending_upcalls(&self, upcall_id: UpcallId) -> usize {
556        self.tasks.map_or(0, |tasks| {
557            let count_before = tasks.len();
558            tasks.retain(|task| match task {
559                // Remove only tasks that are function calls with an id equal
560                // to `upcall_id`.
561                Task::FunctionCall(function_call) => match function_call.source {
562                    FunctionCallSource::Kernel => true,
563                    FunctionCallSource::Driver(id) => id != upcall_id,
564                },
565                _ => true,
566            });
567            let count_after = tasks.len();
568            if config::CONFIG.trace_syscalls {
569                debug!(
570                    "[{:?}] remove_pending_upcalls[{:#x}:{}] = {} upcall(s) removed",
571                    self.processid(),
572                    upcall_id.driver_num,
573                    upcall_id.subscribe_num,
574                    count_before - count_after,
575                );
576            }
577            count_before - count_after
578        })
579    }
580
581    fn is_running(&self) -> bool {
582        match self.state.get() {
583            State::Running | State::Yielded | State::YieldedFor(_) | State::Stopped(_) => true,
584            _ => false,
585        }
586    }
587
588    fn get_state(&self) -> State {
589        self.state.get()
590    }
591
592    fn set_yielded_state(&self) {
593        if self.state.get() == State::Running {
594            self.state.set(State::Yielded);
595        }
596    }
597
598    fn set_yielded_for_state(&self, upcall_id: UpcallId) {
599        if self.state.get() == State::Running {
600            self.state.set(State::YieldedFor(upcall_id));
601        }
602    }
603
604    fn stop(&self) {
605        match self.state.get() {
606            State::Running => self.state.set(State::Stopped(StoppedState::Running)),
607            State::Yielded => self.state.set(State::Stopped(StoppedState::Yielded)),
608            State::YieldedFor(upcall_id) => self
609                .state
610                .set(State::Stopped(StoppedState::YieldedFor(upcall_id))),
611            State::Stopped(_stopped_state) => {
612                // Already stopped, nothing to do.
613            }
614            State::Faulted | State::Terminated => {
615                // Stop has no meaning on a inactive process.
616            }
617        }
618    }
619
620    fn resume(&self) {
621        if let State::Stopped(stopped_state) = self.state.get() {
622            match stopped_state {
623                StoppedState::Running => self.state.set(State::Running),
624                StoppedState::Yielded => self.state.set(State::Yielded),
625                StoppedState::YieldedFor(upcall_id) => self.state.set(State::YieldedFor(upcall_id)),
626            }
627        }
628    }
629
630    fn set_fault_state(&self) {
631        // Use the per-process fault policy to determine what action the kernel
632        // should take since the process faulted.
633        let action = self.fault_policy.action(self);
634        match action {
635            FaultAction::Panic => {
636                // process faulted. Panic and print status
637                self.state.set(State::Faulted);
638                panic!("Process {} had a fault", self.get_process_name());
639            }
640            FaultAction::Restart => {
641                self.try_restart(None);
642            }
643            FaultAction::Stop => {
644                // This looks a lot like restart, except we just leave the app
645                // how it faulted and mark it as `Faulted`. By clearing
646                // all of the app's todo work it will not be scheduled, and
647                // clearing all of the grant regions will cause capsules to drop
648                // this app as well.
649                self.terminate(None);
650                self.state.set(State::Faulted);
651            }
652        }
653    }
654
655    fn start(&self, _cap: &dyn crate::capabilities::ProcessStartCapability) {
656        // `start()` can only be called on a terminated process.
657        if self.get_state() != State::Terminated {
658            return;
659        }
660
661        // Reset to start the process.
662        if let Ok(()) = self.reset() {
663            self.state.set(State::Yielded);
664        }
665    }
666
667    fn try_restart(&self, completion_code: Option<u32>) {
668        // `try_restart()` cannot be called if the process is terminated. Only
669        // `start()` can start a terminated process.
670        if self.get_state() == State::Terminated {
671            return;
672        }
673
674        // Terminate the process, freeing its state and removing any
675        // pending tasks from the scheduler's queue.
676        self.terminate(completion_code);
677
678        // If there is a kernel policy that controls restarts, it should be
679        // implemented here. For now, always restart.
680        if let Ok(()) = self.reset() {
681            self.state.set(State::Yielded);
682        }
683
684        // Decide what to do with res later. E.g., if we can't restart
685        // want to reclaim the process resources.
686    }
687
688    fn terminate(&self, completion_code: Option<u32>) {
689        // A process can be terminated if it is running or in the `Faulted`
690        // state. Otherwise, you cannot terminate it and this method return
691        // early.
692        //
693        // The kernel can terminate in the `Faulted` state to return the process
694        // to a state in which it can run again (e.g., reset it).
695        if !self.is_running() && self.get_state() != State::Faulted {
696            return;
697        }
698
699        // And remove those tasks
700        self.tasks.map(|tasks| {
701            tasks.empty();
702        });
703
704        // Clear any grant regions this app has setup with any capsules.
705        unsafe {
706            self.grant_ptrs_reset();
707        }
708
709        // Save the completion code.
710        self.completion_code.set(completion_code);
711
712        // Mark the app as stopped so the scheduler won't try to run it.
713        self.state.set(State::Terminated);
714    }
715
716    fn get_restart_count(&self) -> usize {
717        self.restart_count.get()
718    }
719
720    fn has_tasks(&self) -> bool {
721        self.tasks.map_or(false, |tasks| tasks.has_elements())
722    }
723
724    fn dequeue_task(&self) -> Option<Task> {
725        self.tasks.map_or(None, |tasks| tasks.dequeue())
726    }
727
728    fn remove_upcall(&self, upcall_id: UpcallId) -> Option<Task> {
729        self.tasks.map_or(None, |tasks| {
730            tasks.remove_first_matching(|task| match task {
731                Task::FunctionCall(fc) => match fc.source {
732                    FunctionCallSource::Driver(upid) => upid == upcall_id,
733                    _ => false,
734                },
735                Task::ReturnValue(rv) => rv.upcall_id == upcall_id,
736                Task::IPC(_) => false,
737            })
738        })
739    }
740
741    fn pending_tasks(&self) -> usize {
742        self.tasks.map_or(0, |tasks| tasks.len())
743    }
744
745    fn get_command_permissions(&self, driver_num: usize, offset: usize) -> CommandPermissions {
746        self.header.get_command_permissions(driver_num, offset)
747    }
748
749    fn get_storage_permissions(&self) -> StoragePermissions {
750        self.storage_permissions
751    }
752
753    fn number_writeable_flash_regions(&self) -> usize {
754        self.header.number_writeable_flash_regions()
755    }
756
757    fn get_writeable_flash_region(&self, region_index: usize) -> (usize, usize) {
758        self.header.get_writeable_flash_region(region_index)
759    }
760
761    fn update_stack_start_pointer(&self, stack_pointer: *const u8) {
762        if stack_pointer >= self.mem_start() && stack_pointer < self.mem_end() {
763            self.debug.set_app_stack_start_pointer(stack_pointer);
764            // We also reset the minimum stack pointer because whatever
765            // value we had could be entirely wrong by now.
766            self.debug.set_app_stack_min_pointer(stack_pointer);
767        }
768    }
769
770    fn update_heap_start_pointer(&self, heap_pointer: *const u8) {
771        if heap_pointer >= self.mem_start() && heap_pointer < self.mem_end() {
772            self.debug.set_app_heap_start_pointer(heap_pointer);
773        }
774    }
775
776    fn setup_mpu(&self) {
777        self.mpu_config.map(|config| {
778            // # Safety
779            //
780            // `configure_mpu` is unsafe, as invoking it with an incorrect
781            // configuration can allow an untrusted application to access
782            // kernel-private memory.
783            //
784            // This call is safe given we trust that the implementation of
785            // `ProcessStandard` correctly provisions a set of MPU regions that
786            // does not grant access to any kernel-private memory, and
787            // `ProcessStandard` does not provide safe, publically accessible
788            // APIs to add other arbitrary MPU regions to this configuration.
789            unsafe {
790                self.chip.mpu().configure_mpu(config);
791            }
792        });
793    }
794
795    fn add_mpu_region(
796        &self,
797        unallocated_memory_start: *const u8,
798        unallocated_memory_size: usize,
799        min_region_size: usize,
800    ) -> Option<mpu::Region> {
801        self.mpu_config.and_then(|config| {
802            let new_region = self.chip.mpu().allocate_region(
803                unallocated_memory_start,
804                unallocated_memory_size,
805                min_region_size,
806                mpu::Permissions::ReadWriteOnly,
807                config,
808            )?;
809
810            for region in self.mpu_regions.iter() {
811                if region.get().is_none() {
812                    region.set(Some(new_region));
813                    return Some(new_region);
814                }
815            }
816
817            // Not enough room in Process struct to store the MPU region.
818            None
819        })
820    }
821
822    fn remove_mpu_region(&self, region: mpu::Region) -> Result<(), ErrorCode> {
823        self.mpu_config.map_or(Err(ErrorCode::INVAL), |config| {
824            // Find the existing mpu region that we are removing; it needs to match exactly.
825            if let Some(internal_region) = self.mpu_regions.iter().find(|r| r.get() == Some(region))
826            {
827                self.chip
828                    .mpu()
829                    .remove_memory_region(region, config)
830                    .or(Err(ErrorCode::FAIL))?;
831
832                // Remove this region from the tracking cache of mpu_regions
833                internal_region.set(None);
834                Ok(())
835            } else {
836                Err(ErrorCode::INVAL)
837            }
838        })
839    }
840
841    fn sbrk(&self, increment: isize) -> Result<CapabilityPtr, Error> {
842        // Do not modify an inactive process.
843        if !self.is_running() {
844            return Err(Error::InactiveApp);
845        }
846
847        let new_break = self.app_break.get().wrapping_offset(increment);
848        self.brk(new_break)
849    }
850
851    fn brk(&self, new_break: *const u8) -> Result<CapabilityPtr, Error> {
852        // Do not modify an inactive process.
853        if !self.is_running() {
854            return Err(Error::InactiveApp);
855        }
856
857        self.mpu_config.map_or(Err(Error::KernelError), |config| {
858            if new_break < self.allow_high_water_mark.get() || new_break >= self.mem_end() {
859                Err(Error::AddressOutOfBounds)
860            } else if new_break > self.kernel_memory_break.get() {
861                Err(Error::OutOfMemory)
862            } else if let Err(()) = self.chip.mpu().update_app_memory_region(
863                new_break,
864                self.kernel_memory_break.get(),
865                mpu::Permissions::ReadWriteOnly,
866                config,
867            ) {
868                Err(Error::OutOfMemory)
869            } else {
870                let old_break = self.app_break.get();
871                self.app_break.set(new_break);
872
873                // # Safety
874                //
875                // `configure_mpu` is unsafe, as invoking it with an incorrect
876                // configuration can allow an untrusted application to access
877                // kernel-private memory.
878                //
879                // This call is safe given we trust that the implementation of
880                // `ProcessStandard` correctly provisions a set of MPU regions
881                // that does not grant access to any kernel-private memory, and
882                // `ProcessStandard` does not provide safe, publically
883                // accessible APIs to add other arbitrary MPU regions to this
884                // configuration.
885                unsafe {
886                    self.chip.mpu().configure_mpu(config);
887                }
888
889                let base = self.mem_start() as usize;
890                let break_result = unsafe {
891                    CapabilityPtr::new_with_authority(
892                        old_break as *const (),
893                        base,
894                        (new_break as usize) - base,
895                        CapabilityPtrPermissions::ReadWrite,
896                    )
897                };
898
899                Ok(break_result)
900            }
901        })
902    }
903
904    #[allow(clippy::not_unsafe_ptr_arg_deref)]
905    fn build_readwrite_process_buffer(
906        &self,
907        buf_start_addr: *mut u8,
908        size: usize,
909    ) -> Result<ReadWriteProcessBuffer, ErrorCode> {
910        if !self.is_running() {
911            // Do not operate on an inactive process
912            return Err(ErrorCode::FAIL);
913        }
914
915        // A process is allowed to pass any pointer if the buffer length is 0,
916        // as to revoke kernel access to a memory region without granting access
917        // to another one
918        if size == 0 {
919            // Clippy complains that we're dereferencing a pointer in a public
920            // and safe function here. While we are not dereferencing the
921            // pointer here, we pass it along to an unsafe function, which is as
922            // dangerous (as it is likely to be dereferenced down the line).
923            //
924            // Relevant discussion:
925            // https://github.com/rust-lang/rust-clippy/issues/3045
926            //
927            // It should be fine to ignore the lint here, as a buffer of length
928            // 0 will never allow dereferencing any memory in a safe manner.
929            //
930            // ### Safety
931            //
932            // We specify a zero-length buffer, so the implementation of
933            // `ReadWriteProcessBuffer` will handle any safety issues.
934            // Therefore, we can encapsulate the unsafe.
935            Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, 0, self.processid()) })
936        } else if self.in_app_owned_memory(buf_start_addr, size) {
937            // TODO: Check for buffer aliasing here
938
939            // Valid buffer, we need to adjust the app's watermark
940            // note: `in_app_owned_memory` ensures this offset does not wrap
941            let buf_end_addr = buf_start_addr.wrapping_add(size);
942            let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
943            self.allow_high_water_mark.set(new_water_mark);
944
945            // Clippy complains that we're dereferencing a pointer in a public
946            // and safe function here. While we are not dereferencing the
947            // pointer here, we pass it along to an unsafe function, which is as
948            // dangerous (as it is likely to be dereferenced down the line).
949            //
950            // Relevant discussion:
951            // https://github.com/rust-lang/rust-clippy/issues/3045
952            //
953            // It should be fine to ignore the lint here, as long as we make
954            // sure that we're pointing towards userspace memory (verified using
955            // `in_app_owned_memory`) and respect alignment and other
956            // constraints of the Rust references created by
957            // `ReadWriteProcessBuffer`.
958            //
959            // ### Safety
960            //
961            // We encapsulate the unsafe here on the condition in the TODO
962            // above, as we must ensure that this `ReadWriteProcessBuffer` will
963            // be the only reference to this memory.
964            Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, size, self.processid()) })
965        } else {
966            Err(ErrorCode::INVAL)
967        }
968    }
969
970    #[allow(clippy::not_unsafe_ptr_arg_deref)]
971    fn build_readonly_process_buffer(
972        &self,
973        buf_start_addr: *const u8,
974        size: usize,
975    ) -> Result<ReadOnlyProcessBuffer, ErrorCode> {
976        if !self.is_running() {
977            // Do not operate on an inactive process
978            return Err(ErrorCode::FAIL);
979        }
980
981        // A process is allowed to pass any pointer if the buffer length is 0,
982        // as to revoke kernel access to a memory region without granting access
983        // to another one
984        if size == 0 {
985            // Clippy complains that we're dereferencing a pointer in a public
986            // and safe function here. While we are not dereferencing the
987            // pointer here, we pass it along to an unsafe function, which is as
988            // dangerous (as it is likely to be dereferenced down the line).
989            //
990            // Relevant discussion:
991            // https://github.com/rust-lang/rust-clippy/issues/3045
992            //
993            // It should be fine to ignore the lint here, as a buffer of length
994            // 0 will never allow dereferencing any memory in a safe manner.
995            //
996            // ### Safety
997            //
998            // We specify a zero-length buffer, so the implementation of
999            // `ReadOnlyProcessBuffer` will handle any safety issues. Therefore,
1000            // we can encapsulate the unsafe.
1001            Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, 0, self.processid()) })
1002        } else if self.in_app_owned_memory(buf_start_addr, size)
1003            || self.in_app_flash_memory(buf_start_addr, size)
1004        {
1005            // TODO: Check for buffer aliasing here
1006
1007            if self.in_app_owned_memory(buf_start_addr, size) {
1008                // Valid buffer, and since this is in read-write memory (i.e.
1009                // not flash), we need to adjust the process's watermark. Note:
1010                // `in_app_owned_memory()` ensures this offset does not wrap.
1011                let buf_end_addr = buf_start_addr.wrapping_add(size);
1012                let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
1013                self.allow_high_water_mark.set(new_water_mark);
1014            }
1015
1016            // Clippy complains that we're dereferencing a pointer in a public
1017            // and safe function here. While we are not dereferencing the
1018            // pointer here, we pass it along to an unsafe function, which is as
1019            // dangerous (as it is likely to be dereferenced down the line).
1020            //
1021            // Relevant discussion:
1022            // https://github.com/rust-lang/rust-clippy/issues/3045
1023            //
1024            // It should be fine to ignore the lint here, as long as we make
1025            // sure that we're pointing towards userspace memory (verified using
1026            // `in_app_owned_memory` or `in_app_flash_memory`) and respect
1027            // alignment and other constraints of the Rust references created by
1028            // `ReadWriteProcessBuffer`.
1029            //
1030            // ### Safety
1031            //
1032            // We encapsulate the unsafe here on the condition in the TODO
1033            // above, as we must ensure that this `ReadOnlyProcessBuffer` will
1034            // be the only reference to this memory.
1035            Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, size, self.processid()) })
1036        } else {
1037            Err(ErrorCode::INVAL)
1038        }
1039    }
1040
1041    unsafe fn set_byte(&self, addr: *mut u8, value: u8) -> bool {
1042        if self.in_app_owned_memory(addr, 1) {
1043            // We verify that this will only write process-accessible memory,
1044            // but this can still be undefined behavior if something else holds
1045            // a reference to this memory.
1046            *addr = value;
1047            true
1048        } else {
1049            false
1050        }
1051    }
1052
1053    fn grant_is_allocated(&self, grant_num: usize) -> Option<bool> {
1054        // Do not modify an inactive process.
1055        if !self.is_running() {
1056            return None;
1057        }
1058
1059        // Update the grant pointer to the address of the new allocation.
1060        self.grant_pointers.map_or(None, |grant_pointers| {
1061            // Implement `grant_pointers[grant_num]` without a chance of a
1062            // panic.
1063            grant_pointers
1064                .get(grant_num)
1065                .map(|grant_entry| !grant_entry.grant_ptr.is_null())
1066        })
1067    }
1068
1069    fn allocate_grant(
1070        &self,
1071        grant_num: usize,
1072        driver_num: usize,
1073        size: usize,
1074        align: usize,
1075    ) -> Result<(), ()> {
1076        // Do not modify an inactive process.
1077        if !self.is_running() {
1078            return Err(());
1079        }
1080
1081        // Verify the grant_num is valid.
1082        if grant_num >= self.kernel.get_grant_count_and_finalize() {
1083            return Err(());
1084        }
1085
1086        // Verify that the grant is not already allocated. If the pointer is not
1087        // null then the grant is already allocated.
1088        if let Some(is_allocated) = self.grant_is_allocated(grant_num) {
1089            if is_allocated {
1090                return Err(());
1091            }
1092        }
1093
1094        // Verify that there is not already a grant allocated with the same
1095        // `driver_num`.
1096        let exists = self.grant_pointers.map_or(false, |grant_pointers| {
1097            // Check our list of grant pointers if the driver number is used.
1098            grant_pointers.iter().any(|grant_entry| {
1099                // Check if the grant is both allocated (its grant pointer is
1100                // non null) and the driver number matches.
1101                (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
1102            })
1103        });
1104        // If we find a match, then the `driver_num` must already be used and
1105        // the grant allocation fails.
1106        if exists {
1107            return Err(());
1108        }
1109
1110        // Use the shared grant allocator function to actually allocate memory.
1111        // Returns `None` if the allocation cannot be created.
1112        if let Some(grant_ptr) = self.allocate_in_grant_region_internal(size, align) {
1113            // Update the grant pointer to the address of the new allocation.
1114            self.grant_pointers.map_or(Err(()), |grant_pointers| {
1115                // Implement `grant_pointers[grant_num] = grant_ptr` without a
1116                // chance of a panic.
1117                grant_pointers
1118                    .get_mut(grant_num)
1119                    .map_or(Err(()), |grant_entry| {
1120                        // Actually set the driver num and grant pointer.
1121                        grant_entry.driver_num = driver_num;
1122                        grant_entry.grant_ptr = grant_ptr.as_ptr();
1123
1124                        // If all of this worked, return true.
1125                        Ok(())
1126                    })
1127            })
1128        } else {
1129            // Could not allocate the memory for the grant region.
1130            Err(())
1131        }
1132    }
1133
1134    fn allocate_custom_grant(
1135        &self,
1136        size: usize,
1137        align: usize,
1138    ) -> Result<(ProcessCustomGrantIdentifier, NonNull<u8>), ()> {
1139        // Do not modify an inactive process.
1140        if !self.is_running() {
1141            return Err(());
1142        }
1143
1144        // Use the shared grant allocator function to actually allocate memory.
1145        // Returns `None` if the allocation cannot be created.
1146        if let Some(ptr) = self.allocate_in_grant_region_internal(size, align) {
1147            // Create the identifier that the caller will use to get access to
1148            // this custom grant in the future.
1149            let identifier = self.create_custom_grant_identifier(ptr);
1150
1151            Ok((identifier, ptr))
1152        } else {
1153            // Could not allocate memory for the custom grant.
1154            Err(())
1155        }
1156    }
1157
1158    fn enter_grant(&self, grant_num: usize) -> Result<NonNull<u8>, Error> {
1159        // Do not try to access the grant region of an inactive process.
1160        if !self.is_running() {
1161            return Err(Error::InactiveApp);
1162        }
1163
1164        // Retrieve the grant pointer from the `grant_pointers` slice. We use
1165        // `[slice].get()` so that if the grant number is invalid this will
1166        // return `Err` and not panic.
1167        self.grant_pointers
1168            .map_or(Err(Error::KernelError), |grant_pointers| {
1169                // Implement `grant_pointers[grant_num]` without a chance of a
1170                // panic.
1171                match grant_pointers.get_mut(grant_num) {
1172                    Some(grant_entry) => {
1173                        // Get a copy of the actual grant pointer.
1174                        let grant_ptr = grant_entry.grant_ptr;
1175
1176                        // Check if the grant pointer is marked that the grant
1177                        // has already been entered. If so, return an error.
1178                        if (grant_ptr as usize) & 0x1 == 0x1 {
1179                            // Lowest bit is one, meaning this grant has been
1180                            // entered.
1181                            Err(Error::AlreadyInUse)
1182                        } else {
1183                            // Now, to mark that the grant has been entered, we
1184                            // set the lowest bit to one and save this as the
1185                            // grant pointer.
1186                            grant_entry.grant_ptr = (grant_ptr as usize | 0x1) as *mut u8;
1187
1188                            // And we return the grant pointer to the entered
1189                            // grant.
1190                            Ok(unsafe { NonNull::new_unchecked(grant_ptr) })
1191                        }
1192                    }
1193                    None => Err(Error::AddressOutOfBounds),
1194                }
1195            })
1196    }
1197
1198    fn enter_custom_grant(
1199        &self,
1200        identifier: ProcessCustomGrantIdentifier,
1201    ) -> Result<*mut u8, Error> {
1202        // Do not try to access the grant region of an inactive process.
1203        if !self.is_running() {
1204            return Err(Error::InactiveApp);
1205        }
1206
1207        // Get the address of the custom grant based on the identifier.
1208        let custom_grant_address = self.get_custom_grant_address(identifier);
1209
1210        // We never deallocate custom grants and only we can change the
1211        // `identifier` so we know this is a valid address for the custom grant.
1212        Ok(custom_grant_address as *mut u8)
1213    }
1214
1215    unsafe fn leave_grant(&self, grant_num: usize) {
1216        // Do not modify an inactive process.
1217        if !self.is_running() {
1218            return;
1219        }
1220
1221        self.grant_pointers.map(|grant_pointers| {
1222            // Implement `grant_pointers[grant_num]` without a chance of a
1223            // panic.
1224            if let Some(grant_entry) = grant_pointers.get_mut(grant_num) {
1225                // Get a copy of the actual grant pointer.
1226                let grant_ptr = grant_entry.grant_ptr;
1227
1228                // Now, to mark that the grant has been released, we set the
1229                // lowest bit back to zero and save this as the grant
1230                // pointer.
1231                grant_entry.grant_ptr = (grant_ptr as usize & !0x1) as *mut u8;
1232            }
1233        });
1234    }
1235
1236    fn grant_allocated_count(&self) -> Option<usize> {
1237        // Do not modify an inactive process.
1238        if !self.is_running() {
1239            return None;
1240        }
1241
1242        self.grant_pointers.map(|grant_pointers| {
1243            // Filter our list of grant pointers into just the non-null ones,
1244            // and count those. A grant is allocated if its grant pointer is
1245            // non-null.
1246            grant_pointers
1247                .iter()
1248                .filter(|grant_entry| !grant_entry.grant_ptr.is_null())
1249                .count()
1250        })
1251    }
1252
1253    fn lookup_grant_from_driver_num(&self, driver_num: usize) -> Result<usize, Error> {
1254        self.grant_pointers
1255            .map_or(Err(Error::KernelError), |grant_pointers| {
1256                // Filter our list of grant pointers into just the non null
1257                // ones, and count those. A grant is allocated if its grant
1258                // pointer is non-null.
1259                match grant_pointers.iter().position(|grant_entry| {
1260                    // Only consider allocated grants.
1261                    (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
1262                }) {
1263                    Some(idx) => Ok(idx),
1264                    None => Err(Error::OutOfMemory),
1265                }
1266            })
1267    }
1268
1269    fn is_valid_upcall_function_pointer(&self, upcall_fn: *const ()) -> bool {
1270        let ptr = upcall_fn as *const u8;
1271        let size = mem::size_of::<*const u8>();
1272
1273        // It is okay if this function is in memory or flash.
1274        self.in_app_flash_memory(ptr, size) || self.in_app_owned_memory(ptr, size)
1275    }
1276
1277    fn get_process_name(&self) -> &'static str {
1278        self.header.get_package_name().unwrap_or("")
1279    }
1280
1281    fn get_completion_code(&self) -> Option<Option<u32>> {
1282        self.completion_code.get()
1283    }
1284
1285    fn set_syscall_return_value(&self, return_value: SyscallReturn) {
1286        match self.stored_state.map(|stored_state| unsafe {
1287            // Actually set the return value for a particular process.
1288            //
1289            // The UKB implementation uses the bounds of process-accessible
1290            // memory to verify that any memory changes are valid. Here, the
1291            // unsafe promise we are making is that the bounds passed to the UKB
1292            // are correct.
1293            self.chip
1294                .userspace_kernel_boundary()
1295                .set_syscall_return_value(
1296                    self.mem_start(),
1297                    self.app_break.get(),
1298                    stored_state,
1299                    return_value,
1300                )
1301        }) {
1302            Some(Ok(())) => {
1303                // If we get an `Ok` we are all set.
1304
1305                // The process is either already in the running state (having
1306                // just called a nonblocking syscall like command) or needs to
1307                // be moved to the running state having called Yield-WaitFor and
1308                // now needing to be resumed. Either way we can set the state to
1309                // running.
1310                self.state.set(State::Running);
1311            }
1312
1313            Some(Err(())) => {
1314                // If we get an `Err`, then the UKB implementation could not set
1315                // the return value, likely because the process's stack is no
1316                // longer accessible to it. All we can do is fault.
1317                self.set_fault_state();
1318            }
1319
1320            None => {
1321                // We should never be here since `stored_state` should always be
1322                // occupied.
1323                self.set_fault_state();
1324            }
1325        }
1326    }
1327
1328    fn set_process_function(&self, callback: FunctionCall) {
1329        // See if we can actually enqueue this function for this process.
1330        // Architecture-specific code handles actually doing this since the
1331        // exact method is both architecture- and implementation-specific.
1332        //
1333        // This can fail, for example if the process does not have enough memory
1334        // remaining.
1335        match self.stored_state.map(|stored_state| {
1336            // Let the UKB implementation handle setting the process's PC so
1337            // that the process executes the upcall function. We encapsulate
1338            // unsafe here because we are guaranteeing that the memory bounds
1339            // passed to `set_process_function` are correct.
1340            unsafe {
1341                self.chip.userspace_kernel_boundary().set_process_function(
1342                    self.mem_start(),
1343                    self.app_break.get(),
1344                    stored_state,
1345                    callback,
1346                )
1347            }
1348        }) {
1349            Some(Ok(())) => {
1350                // If we got an `Ok` we are all set and should mark that this
1351                // process is ready to be scheduled.
1352
1353                // Move this process to the "running" state so the scheduler
1354                // will schedule it.
1355                self.state.set(State::Running);
1356            }
1357
1358            Some(Err(())) => {
1359                // If we got an Error, then there was likely not enough room on
1360                // the stack to allow the process to execute this function given
1361                // the details of the particular architecture this is running
1362                // on. This process has essentially faulted, so we mark it as
1363                // such.
1364                self.set_fault_state();
1365            }
1366
1367            None => {
1368                // We should never be here since `stored_state` should always be
1369                // occupied.
1370                self.set_fault_state();
1371            }
1372        }
1373    }
1374
1375    fn switch_to(&self) -> Option<syscall::ContextSwitchReason> {
1376        // Cannot switch to an invalid process
1377        if !self.is_running() {
1378            return None;
1379        }
1380
1381        let (switch_reason, stack_pointer) =
1382            self.stored_state.map_or((None, None), |stored_state| {
1383                // Switch to the process. We guarantee that the memory pointers
1384                // we pass are valid, ensuring this context switch is safe.
1385                // Therefore we encapsulate the `unsafe`.
1386                unsafe {
1387                    let (switch_reason, optional_stack_pointer) = self
1388                        .chip
1389                        .userspace_kernel_boundary()
1390                        .switch_to_process(self.mem_start(), self.app_break.get(), stored_state);
1391                    (Some(switch_reason), optional_stack_pointer)
1392                }
1393            });
1394
1395        // If the UKB implementation passed us a stack pointer, update our
1396        // debugging state. This is completely optional.
1397        if let Some(sp) = stack_pointer {
1398            self.debug.set_new_app_stack_min_pointer(sp);
1399        }
1400
1401        switch_reason
1402    }
1403
1404    fn debug_syscall_count(&self) -> usize {
1405        self.debug.get_syscall_count()
1406    }
1407
1408    fn debug_dropped_upcall_count(&self) -> usize {
1409        self.debug.get_dropped_upcall_count()
1410    }
1411
1412    fn debug_timeslice_expiration_count(&self) -> usize {
1413        self.debug.get_timeslice_expiration_count()
1414    }
1415
1416    fn debug_timeslice_expired(&self) {
1417        self.debug.increment_timeslice_expiration_count();
1418    }
1419
1420    fn debug_syscall_called(&self, last_syscall: Syscall) {
1421        self.debug.increment_syscall_count();
1422        self.debug.set_last_syscall(last_syscall);
1423    }
1424
1425    fn debug_syscall_last(&self) -> Option<Syscall> {
1426        self.debug.get_last_syscall()
1427    }
1428
1429    fn get_addresses(&self) -> ProcessAddresses {
1430        ProcessAddresses {
1431            flash_start: self.flash_start() as usize,
1432            flash_non_protected_start: self.flash_non_protected_start() as usize,
1433            flash_integrity_end: ((self.flash.as_ptr() as usize)
1434                + (self.header.get_binary_end() as usize))
1435                as *const u8,
1436            flash_end: self.flash_end() as usize,
1437            sram_start: self.mem_start() as usize,
1438            sram_app_brk: self.app_memory_break() as usize,
1439            sram_grant_start: self.kernel_memory_break() as usize,
1440            sram_end: self.mem_end() as usize,
1441            sram_heap_start: self.debug.get_app_heap_start_pointer().map(|p| p as usize),
1442            sram_stack_top: self.debug.get_app_stack_start_pointer().map(|p| p as usize),
1443            sram_stack_bottom: self.debug.get_app_stack_min_pointer().map(|p| p as usize),
1444        }
1445    }
1446
1447    fn get_sizes(&self) -> ProcessSizes {
1448        ProcessSizes {
1449            grant_pointers: mem::size_of::<GrantPointerEntry>()
1450                * self.kernel.get_grant_count_and_finalize(),
1451            upcall_list: Self::CALLBACKS_OFFSET,
1452            process_control_block: Self::PROCESS_STRUCT_OFFSET,
1453        }
1454    }
1455
1456    fn print_full_process(&self, writer: &mut dyn Write) {
1457        if !config::CONFIG.debug_panics {
1458            return;
1459        }
1460
1461        self.stored_state.map(|stored_state| {
1462            // We guarantee the memory bounds pointers provided to the UKB are
1463            // correct.
1464            unsafe {
1465                self.chip.userspace_kernel_boundary().print_context(
1466                    self.mem_start(),
1467                    self.app_break.get(),
1468                    stored_state,
1469                    writer,
1470                );
1471            }
1472        });
1473
1474        // Display grant information.
1475        let number_grants = self.kernel.get_grant_count_and_finalize();
1476        let _ = writer.write_fmt(format_args!(
1477            "\
1478            \r\n Total number of grant regions defined: {}\r\n",
1479            self.kernel.get_grant_count_and_finalize()
1480        ));
1481        let rows = number_grants.div_ceil(3);
1482
1483        // Access our array of grant pointers.
1484        self.grant_pointers.map(|grant_pointers| {
1485            // Iterate each grant and show its address.
1486            for i in 0..rows {
1487                for j in 0..3 {
1488                    let index = i + (rows * j);
1489                    if index >= number_grants {
1490                        break;
1491                    }
1492
1493                    // Implement `grant_pointers[grant_num]` without a chance of
1494                    // a panic.
1495                    grant_pointers.get(index).map(|grant_entry| {
1496                        if grant_entry.grant_ptr.is_null() {
1497                            let _ =
1498                                writer.write_fmt(format_args!("  Grant {:>2} : --        ", index));
1499                        } else {
1500                            let _ = writer.write_fmt(format_args!(
1501                                "  Grant {:>2} {:#x}: {:p}",
1502                                index, grant_entry.driver_num, grant_entry.grant_ptr
1503                            ));
1504                        }
1505                    });
1506                }
1507                let _ = writer.write_fmt(format_args!("\r\n"));
1508            }
1509        });
1510
1511        // Display the current state of the MPU for this process.
1512        self.mpu_config.map(|config| {
1513            let _ = writer.write_fmt(format_args!("{}", config));
1514        });
1515
1516        // Print a helpful message on how to re-compile a process to view the
1517        // listing file. If a process is PIC, then we also need to print the
1518        // actual addresses the process executed at so that the .lst file can be
1519        // generated for those addresses. If the process was already compiled
1520        // for a fixed address, then just generating a .lst file is fine.
1521
1522        if self.debug.get_fixed_address_flash().is_some() {
1523            // Fixed addresses, can just run `make lst`.
1524            let _ = writer.write_fmt(format_args!(
1525                "\
1526                    \r\nTo debug libtock-c apps, run `make lst` in the app's\
1527                    \r\nfolder and open the arch.{:#x}.{:#x}.lst file.\r\n\r\n",
1528                self.debug.get_fixed_address_flash().unwrap_or(0),
1529                self.debug.get_fixed_address_ram().unwrap_or(0)
1530            ));
1531        } else {
1532            // PIC, need to specify the addresses.
1533            let sram_start = self.mem_start() as usize;
1534            let flash_start = self.flash.as_ptr() as usize;
1535            let flash_init_fn = flash_start + self.header.get_init_function_offset() as usize;
1536
1537            let _ = writer.write_fmt(format_args!(
1538                "\
1539                    \r\nTo debug libtock-c apps, run\
1540                    \r\n`make debug RAM_START={:#x} FLASH_INIT={:#x}`\
1541                    \r\nin the app's folder and open the .lst file.\r\n\r\n",
1542                sram_start, flash_init_fn
1543            ));
1544        }
1545    }
1546
1547    fn get_stored_state(&self, out: &mut [u8]) -> Result<usize, ErrorCode> {
1548        self.stored_state
1549            .map(|stored_state| {
1550                self.chip
1551                    .userspace_kernel_boundary()
1552                    .store_context(stored_state, out)
1553            })
1554            .unwrap_or(Err(ErrorCode::FAIL))
1555    }
1556}
1557
1558impl<C: 'static + Chip, D: 'static + ProcessStandardDebug> ProcessStandard<'_, C, D> {
1559    // Memory offset for upcall ring buffer (10 element length).
1560    const CALLBACK_LEN: usize = 10;
1561    const CALLBACKS_OFFSET: usize = mem::size_of::<Task>() * Self::CALLBACK_LEN;
1562
1563    // Memory offset to make room for this process's metadata.
1564    const PROCESS_STRUCT_OFFSET: usize = mem::size_of::<ProcessStandard<C, D>>();
1565
1566    /// Create a `ProcessStandard` object based on the found `ProcessBinary`.
1567    pub(crate) unsafe fn create<'a>(
1568        kernel: &'static Kernel,
1569        chip: &'static C,
1570        pb: ProcessBinary,
1571        remaining_memory: &'a mut [u8],
1572        fault_policy: &'static dyn ProcessFaultPolicy,
1573        storage_permissions_policy: &'static dyn ProcessStandardStoragePermissionsPolicy<C, D>,
1574        app_id: ShortId,
1575        index: usize,
1576    ) -> Result<(Option<&'static dyn Process>, &'a mut [u8]), (ProcessLoadError, &'a mut [u8])>
1577    {
1578        let process_name = pb.header.get_package_name();
1579        let process_ram_requested_size = pb.header.get_minimum_app_ram_size() as usize;
1580
1581        // Initialize MPU region configuration.
1582        let mut mpu_config = match chip.mpu().new_config() {
1583            Some(mpu_config) => mpu_config,
1584            None => return Err((ProcessLoadError::MpuConfigurationError, remaining_memory)),
1585        };
1586
1587        // Allocate MPU region for flash.
1588        if chip
1589            .mpu()
1590            .allocate_region(
1591                pb.flash.as_ptr(),
1592                pb.flash.len(),
1593                pb.flash.len(),
1594                mpu::Permissions::ReadExecuteOnly,
1595                &mut mpu_config,
1596            )
1597            .is_none()
1598        {
1599            if config::CONFIG.debug_load_processes {
1600                debug!(
1601                        "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate MPU region for flash",
1602                        pb.flash.as_ptr() as usize,
1603                        pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1604                        process_name
1605                    );
1606            }
1607            return Err((ProcessLoadError::MpuInvalidFlashLength, remaining_memory));
1608        }
1609
1610        // Determine how much space we need in the application's memory space
1611        // just for kernel and grant state. We need to make sure we allocate
1612        // enough memory just for that.
1613
1614        // Make room for grant pointers.
1615        let grant_ptr_size = mem::size_of::<GrantPointerEntry>();
1616        let grant_ptrs_num = kernel.get_grant_count_and_finalize();
1617        let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
1618
1619        // Initial size of the kernel-owned part of process memory can be
1620        // calculated directly based on the initial size of all kernel-owned
1621        // data structures.
1622        //
1623        // We require our kernel memory break (located at the end of the
1624        // MPU-returned allocated memory region) to be word-aligned. However, we
1625        // don't have any explicit alignment constraints from the MPU. To ensure
1626        // that the below kernel-owned data structures still fit into the
1627        // kernel-owned memory even with padding for alignment, add an extra
1628        // `sizeof(usize)` bytes.
1629        let initial_kernel_memory_size = grant_ptrs_offset
1630            + Self::CALLBACKS_OFFSET
1631            + Self::PROCESS_STRUCT_OFFSET
1632            + core::mem::size_of::<usize>();
1633
1634        // By default we start with the initial size of process-accessible
1635        // memory set to 0. This maximizes the flexibility that processes have
1636        // to allocate their memory as they see fit. If a process needs more
1637        // accessible memory it must use the `brk` memop syscalls to request
1638        // more memory.
1639        //
1640        // We must take into account any process-accessible memory required by
1641        // the context switching implementation and allocate at least that much
1642        // memory so that we can successfully switch to the process. This is
1643        // architecture and implementation specific, so we query that now.
1644        let min_process_memory_size = chip
1645            .userspace_kernel_boundary()
1646            .initial_process_app_brk_size();
1647
1648        // We have to ensure that we at least ask the MPU for
1649        // `min_process_memory_size` so that we can be sure that `app_brk` is
1650        // not set inside the kernel-owned memory region. Now, in practice,
1651        // processes should not request 0 (or very few) bytes of memory in their
1652        // TBF header (i.e. `process_ram_requested_size` will almost always be
1653        // much larger than `min_process_memory_size`), as they are unlikely to
1654        // work with essentially no available memory. But, we still must protect
1655        // for that case.
1656        let min_process_ram_size = cmp::max(process_ram_requested_size, min_process_memory_size);
1657
1658        // Minimum memory size for the process.
1659        let min_total_memory_size = min_process_ram_size + initial_kernel_memory_size;
1660
1661        // Check if this process requires a fixed memory start address. If so,
1662        // try to adjust the memory region to work for this process.
1663        //
1664        // Right now, we only support skipping some RAM and leaving a chunk
1665        // unused so that the memory region starts where the process needs it
1666        // to.
1667        let remaining_memory = if let Some(fixed_memory_start) = pb.header.get_fixed_address_ram() {
1668            // The process does have a fixed address.
1669            if fixed_memory_start == remaining_memory.as_ptr() as u32 {
1670                // Address already matches.
1671                remaining_memory
1672            } else if fixed_memory_start > remaining_memory.as_ptr() as u32 {
1673                // Process wants a memory address farther in memory. Try to
1674                // advance the memory region to make the address match.
1675                let diff = (fixed_memory_start - remaining_memory.as_ptr() as u32) as usize;
1676                if diff > remaining_memory.len() {
1677                    // We ran out of memory.
1678                    let actual_address =
1679                        remaining_memory.as_ptr() as u32 + remaining_memory.len() as u32 - 1;
1680                    let expected_address = fixed_memory_start;
1681                    return Err((
1682                        ProcessLoadError::MemoryAddressMismatch {
1683                            actual_address,
1684                            expected_address,
1685                        },
1686                        remaining_memory,
1687                    ));
1688                } else {
1689                    // Change the memory range to start where the process
1690                    // requested it. Because of the if statement above we know this should
1691                    // work. Doing it more cleanly would be good but was a bit beyond my borrow
1692                    // ken; calling get_mut has a mutable borrow.-pal
1693                    &mut remaining_memory[diff..]
1694                }
1695            } else {
1696                // Address is earlier in memory, nothing we can do.
1697                let actual_address = remaining_memory.as_ptr() as u32;
1698                let expected_address = fixed_memory_start;
1699                return Err((
1700                    ProcessLoadError::MemoryAddressMismatch {
1701                        actual_address,
1702                        expected_address,
1703                    },
1704                    remaining_memory,
1705                ));
1706            }
1707        } else {
1708            remaining_memory
1709        };
1710
1711        // Determine where process memory will go and allocate an MPU region.
1712        //
1713        // `[allocation_start, allocation_size)` will cover both
1714        //
1715        // - the app-owned `min_process_memory_size`-long part of memory (at
1716        //   some offset within `remaining_memory`), as well as
1717        //
1718        // - the kernel-owned allocation growing downward starting at the end
1719        //   of this allocation, `initial_kernel_memory_size` bytes long.
1720        //
1721        let (allocation_start, allocation_size) = match chip.mpu().allocate_app_memory_region(
1722            remaining_memory.as_ptr(),
1723            remaining_memory.len(),
1724            min_total_memory_size,
1725            min_process_memory_size,
1726            initial_kernel_memory_size,
1727            mpu::Permissions::ReadWriteOnly,
1728            &mut mpu_config,
1729        ) {
1730            Some((memory_start, memory_size)) => (memory_start, memory_size),
1731            None => {
1732                // Failed to load process. Insufficient memory.
1733                if config::CONFIG.debug_load_processes {
1734                    debug!(
1735                            "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate memory region of size >= {:#X}",
1736                            pb.flash.as_ptr() as usize,
1737                            pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1738                            process_name,
1739                            min_total_memory_size
1740                        );
1741                }
1742                return Err((ProcessLoadError::NotEnoughMemory, remaining_memory));
1743            }
1744        };
1745
1746        // Determine the offset of the app-owned part of the above memory
1747        // allocation. An MPU may not place it at the very start of
1748        // `remaining_memory` for internal alignment constraints. This can only
1749        // overflow if the MPU implementation is incorrect; a compliant
1750        // implementation must return a memory allocation within the
1751        // `remaining_memory` slice.
1752        let app_memory_start_offset =
1753            allocation_start as usize - remaining_memory.as_ptr() as usize;
1754
1755        // Check if the memory region is valid for the process. If a process
1756        // included a fixed address for the start of RAM in its TBF header (this
1757        // field is optional, processes that are position independent do not
1758        // need a fixed address) then we check that we used the same address
1759        // when we allocated it in RAM.
1760        if let Some(fixed_memory_start) = pb.header.get_fixed_address_ram() {
1761            let actual_address = remaining_memory.as_ptr() as u32 + app_memory_start_offset as u32;
1762            let expected_address = fixed_memory_start;
1763            if actual_address != expected_address {
1764                return Err((
1765                    ProcessLoadError::MemoryAddressMismatch {
1766                        actual_address,
1767                        expected_address,
1768                    },
1769                    remaining_memory,
1770                ));
1771            }
1772        }
1773
1774        // With our MPU allocation, we can begin to divide up the
1775        // `remaining_memory` slice into individual regions for the process and
1776        // kernel, as follows:
1777        //
1778        //
1779        //  +-----------------------------------------------------------------
1780        //  | remaining_memory
1781        //  +----------------------------------------------------+------------
1782        //  v                                                    v
1783        //  +----------------------------------------------------+
1784        //  | allocated_padded_memory                            |
1785        //  +--+-------------------------------------------------+
1786        //     v                                                 v
1787        //     +-------------------------------------------------+
1788        //     | allocated_memory                                |
1789        //     +-------------------------------------------------+
1790        //     v                                                 v
1791        //     +-----------------------+-------------------------+
1792        //     | app_accessible_memory | allocated_kernel_memory |
1793        //     +-----------------------+-------------------+-----+
1794        //                                                 v
1795        //                               kernel memory break
1796        //                                                  \---+/
1797        //                                                      v
1798        //                                        optional padding
1799        //
1800        //
1801        // First split the `remaining_memory` into two slices:
1802        //
1803        // - `allocated_padded_memory`: the allocated memory region, containing
1804        //
1805        //   1. optional padding at the start of the memory region of
1806        //      `app_memory_start_offset` bytes,
1807        //
1808        //   2. the app accessible memory region of `min_process_memory_size`,
1809        //
1810        //   3. optional unallocated memory, and
1811        //
1812        //   4. kernel-reserved memory, growing downward starting at
1813        //      `app_memory_padding`.
1814        //
1815        // - `unused_memory`: the rest of the `remaining_memory`, not assigned
1816        //   to this app.
1817        //
1818        let (allocated_padded_memory, unused_memory) =
1819            remaining_memory.split_at_mut(app_memory_start_offset + allocation_size);
1820
1821        // Now, slice off the (optional) padding at the start:
1822        let (_padding, allocated_memory) =
1823            allocated_padded_memory.split_at_mut(app_memory_start_offset);
1824
1825        // We continue to sub-slice the `allocated_memory` into
1826        // process-accessible and kernel-owned memory. Prior to that, store the
1827        // start and length ofthe overall allocation:
1828        let allocated_memory_start = allocated_memory.as_ptr();
1829        let allocated_memory_len = allocated_memory.len();
1830
1831        // Slice off the process-accessible memory:
1832        let (app_accessible_memory, allocated_kernel_memory) =
1833            allocated_memory.split_at_mut(min_process_memory_size);
1834
1835        // Set the initial process-accessible memory:
1836        let initial_app_brk = app_accessible_memory
1837            .as_ptr()
1838            .add(app_accessible_memory.len());
1839
1840        // Set the initial allow high water mark to the start of process memory
1841        // since no `allow` calls have been made yet.
1842        let initial_allow_high_water_mark = app_accessible_memory.as_ptr();
1843
1844        // Set up initial grant region.
1845        //
1846        // `kernel_memory_break` is set to the end of kernel-accessible memory
1847        // and grows downward.
1848        //
1849        // We require the `kernel_memory_break` to be aligned to a
1850        // word-boundary, as we rely on this during offset calculations to
1851        // kernel-accessed structs (e.g. the grant pointer table) below. As it
1852        // moves downward in the address space, we can't use the `align_offset`
1853        // convenience functions.
1854        //
1855        // Calling `wrapping_sub` is safe here, as we've factored in an optional
1856        // padding of at most `sizeof(usize)` bytes in the calculation of
1857        // `initial_kernel_memory_size` above.
1858        let mut kernel_memory_break = allocated_kernel_memory
1859            .as_ptr()
1860            .add(allocated_kernel_memory.len());
1861
1862        kernel_memory_break = kernel_memory_break
1863            .wrapping_sub(kernel_memory_break as usize % core::mem::size_of::<usize>());
1864
1865        // Now that we know we have the space we can setup the grant pointers.
1866        kernel_memory_break = kernel_memory_break.offset(-(grant_ptrs_offset as isize));
1867
1868        // This is safe, `kernel_memory_break` is aligned to a word-boundary,
1869        // and `grant_ptrs_offset` is a multiple of the word size.
1870        #[allow(clippy::cast_ptr_alignment)]
1871        // Set all grant pointers to null.
1872        let grant_pointers = slice::from_raw_parts_mut(
1873            kernel_memory_break as *mut GrantPointerEntry,
1874            grant_ptrs_num,
1875        );
1876        for grant_entry in grant_pointers.iter_mut() {
1877            grant_entry.driver_num = 0;
1878            grant_entry.grant_ptr = ptr::null_mut();
1879        }
1880
1881        // Now that we know we have the space we can setup the memory for the
1882        // upcalls.
1883        kernel_memory_break = kernel_memory_break.offset(-(Self::CALLBACKS_OFFSET as isize));
1884
1885        // This is safe today, as MPU constraints ensure that `memory_start`
1886        // will always be aligned on at least a word boundary, and that
1887        // memory_size will be aligned on at least a word boundary, and
1888        // `grant_ptrs_offset` is a multiple of the word size. Thus,
1889        // `kernel_memory_break` must be word aligned. While this is unlikely to
1890        // change, it should be more proactively enforced.
1891        //
1892        // TODO: https://github.com/tock/tock/issues/1739
1893        #[allow(clippy::cast_ptr_alignment)]
1894        // Set up ring buffer for upcalls to the process.
1895        let upcall_buf =
1896            slice::from_raw_parts_mut(kernel_memory_break as *mut Task, Self::CALLBACK_LEN);
1897        let tasks = RingBuffer::new(upcall_buf);
1898
1899        // Last thing in the kernel region of process RAM is the process struct.
1900        kernel_memory_break = kernel_memory_break.offset(-(Self::PROCESS_STRUCT_OFFSET as isize));
1901        let process_struct_memory_location = kernel_memory_break;
1902
1903        // Create the Process struct in the app grant region.
1904        // Note that this requires every field be explicitly initialized, as
1905        // we are just transforming a pointer into a structure.
1906        let process: &mut ProcessStandard<C, D> =
1907            &mut *(process_struct_memory_location as *mut ProcessStandard<'static, C, D>);
1908
1909        // Ask the kernel for a unique identifier for this process that is being
1910        // created.
1911        let unique_identifier = kernel.create_process_identifier();
1912
1913        // Save copies of these in case the app was compiled for fixed addresses
1914        // for later debugging.
1915        let fixed_address_flash = pb.header.get_fixed_address_flash();
1916        let fixed_address_ram = pb.header.get_fixed_address_ram();
1917
1918        process
1919            .process_id
1920            .set(ProcessId::new(kernel, unique_identifier, index));
1921        process.app_id = app_id;
1922        process.kernel = kernel;
1923        process.chip = chip;
1924        process.allow_high_water_mark = Cell::new(initial_allow_high_water_mark);
1925        process.memory_start = allocated_memory_start;
1926        process.memory_len = allocated_memory_len;
1927        process.header = pb.header;
1928        process.kernel_memory_break = Cell::new(kernel_memory_break);
1929        process.app_break = Cell::new(initial_app_brk);
1930        process.grant_pointers = MapCell::new(grant_pointers);
1931
1932        process.credential = pb.credential.get();
1933        process.footers = pb.footers;
1934        process.flash = pb.flash;
1935
1936        process.stored_state = MapCell::new(Default::default());
1937        // Mark this process as approved and leave it to the kernel to start it.
1938        process.state = Cell::new(State::Yielded);
1939        process.fault_policy = fault_policy;
1940        process.restart_count = Cell::new(0);
1941        process.completion_code = OptionalCell::empty();
1942
1943        process.mpu_config = MapCell::new(mpu_config);
1944        process.mpu_regions = [
1945            Cell::new(None),
1946            Cell::new(None),
1947            Cell::new(None),
1948            Cell::new(None),
1949            Cell::new(None),
1950            Cell::new(None),
1951        ];
1952        process.tasks = MapCell::new(tasks);
1953
1954        process.debug = D::default();
1955        if let Some(fix_addr_flash) = fixed_address_flash {
1956            process.debug.set_fixed_address_flash(fix_addr_flash);
1957        }
1958        if let Some(fix_addr_ram) = fixed_address_ram {
1959            process.debug.set_fixed_address_ram(fix_addr_ram);
1960        }
1961
1962        // Handle any architecture-specific requirements for a new process.
1963        //
1964        // NOTE! We have to ensure that the start of process-accessible memory
1965        // (`app_memory_start`) is word-aligned. Since we currently start
1966        // process-accessible memory at the beginning of the allocated memory
1967        // region, we trust the MPU to give us a word-aligned starting address.
1968        //
1969        // TODO: https://github.com/tock/tock/issues/1739
1970        match process.stored_state.map(|stored_state| {
1971            chip.userspace_kernel_boundary().initialize_process(
1972                app_accessible_memory.as_ptr(),
1973                initial_app_brk,
1974                stored_state,
1975            )
1976        }) {
1977            Some(Ok(())) => {}
1978            _ => {
1979                if config::CONFIG.debug_load_processes {
1980                    debug!(
1981                        "[!] flash={:#010X}-{:#010X} process={:?} - couldn't initialize process",
1982                        pb.flash.as_ptr() as usize,
1983                        pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1984                        process_name
1985                    );
1986                }
1987                // Note that since remaining_memory was split by split_at_mut into
1988                // application memory and unused_memory, a failure here will leak
1989                // the application memory. Not leaking it requires being able to
1990                // reconstitute the original memory slice.
1991                return Err((ProcessLoadError::InternalError, unused_memory));
1992            }
1993        }
1994
1995        let flash_start = process.flash.as_ptr();
1996        let app_start =
1997            flash_start.wrapping_add(process.header.get_app_start_offset() as usize) as usize;
1998        let init_addr =
1999            flash_start.wrapping_add(process.header.get_init_function_offset() as usize) as usize;
2000        let fn_base = flash_start as usize;
2001        let fn_len = process.flash.len();
2002
2003        // We need to construct a capability with sufficient authority to cover all of a user's
2004        // code, with permissions to execute it. The entirety of flash is sufficient.
2005
2006        let init_fn = CapabilityPtr::new_with_authority(
2007            init_addr as *const (),
2008            fn_base,
2009            fn_len,
2010            CapabilityPtrPermissions::Execute,
2011        );
2012
2013        process.tasks.map(|tasks| {
2014            tasks.enqueue(Task::FunctionCall(FunctionCall {
2015                source: FunctionCallSource::Kernel,
2016                pc: init_fn,
2017                argument0: app_start,
2018                argument1: process.memory_start as usize,
2019                argument2: process.memory_len,
2020                argument3: (process.app_break.get() as usize).into(),
2021            }));
2022        });
2023
2024        // Set storage permissions. Put this at the end so that `process` is
2025        // completely formed before using it to determine the storage
2026        // permissions.
2027        process.storage_permissions = storage_permissions_policy.get_permissions(process);
2028
2029        // Return the process object and a remaining memory for processes slice.
2030        Ok((Some(process), unused_memory))
2031    }
2032
2033    /// Reset the process, resetting all of its state and re-initializing it so
2034    /// it can start running. Assumes the process is not running but is still in
2035    /// flash and still has its memory region allocated to it.
2036    fn reset(&self) -> Result<(), ErrorCode> {
2037        // We need a new process identifier for this process since the restarted
2038        // version is in effect a new process. This is also necessary to
2039        // invalidate any stored `ProcessId`s that point to the old version of
2040        // the process. However, the process has not moved locations in the
2041        // processes array, so we copy the existing index.
2042        let old_index = self.process_id.get().index;
2043        let new_identifier = self.kernel.create_process_identifier();
2044        self.process_id
2045            .set(ProcessId::new(self.kernel, new_identifier, old_index));
2046
2047        // Reset debug information that is per-execution and not per-process.
2048        self.debug.reset_last_syscall();
2049        self.debug.reset_syscall_count();
2050        self.debug.reset_dropped_upcall_count();
2051        self.debug.reset_timeslice_expiration_count();
2052
2053        // Reset MPU region configuration.
2054        //
2055        // TODO: ideally, this would be moved into a helper function used by
2056        // both create() and reset(), but process load debugging complicates
2057        // this. We just want to create new config with only flash and memory
2058        // regions.
2059        //
2060        // We must have a previous MPU configuration stored, fault the
2061        // process if this invariant is violated. We avoid allocating
2062        // a new MPU configuration, as this may eventually exhaust the
2063        // number of available MPU configurations.
2064        let mut mpu_config = self.mpu_config.take().ok_or(ErrorCode::FAIL)?;
2065        self.chip.mpu().reset_config(&mut mpu_config);
2066
2067        // Allocate MPU region for flash.
2068        let app_mpu_flash = self.chip.mpu().allocate_region(
2069            self.flash.as_ptr(),
2070            self.flash.len(),
2071            self.flash.len(),
2072            mpu::Permissions::ReadExecuteOnly,
2073            &mut mpu_config,
2074        );
2075        if app_mpu_flash.is_none() {
2076            // We were unable to allocate an MPU region for flash. This is very
2077            // unexpected since we previously ran this process. However, we
2078            // return now and leave the process faulted and it will not be
2079            // scheduled.
2080            return Err(ErrorCode::FAIL);
2081        }
2082
2083        // RAM
2084
2085        // Re-determine the minimum amount of RAM the kernel must allocate to
2086        // the process based on the specific requirements of the syscall
2087        // implementation.
2088        let min_process_memory_size = self
2089            .chip
2090            .userspace_kernel_boundary()
2091            .initial_process_app_brk_size();
2092
2093        // Recalculate initial_kernel_memory_size as was done in create()
2094        let grant_ptr_size = mem::size_of::<(usize, *mut u8)>();
2095        let grant_ptrs_num = self.kernel.get_grant_count_and_finalize();
2096        let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
2097
2098        let initial_kernel_memory_size =
2099            grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET;
2100
2101        let app_mpu_mem = self.chip.mpu().allocate_app_memory_region(
2102            self.mem_start(),
2103            self.memory_len,
2104            self.memory_len, //we want exactly as much as we had before restart
2105            min_process_memory_size,
2106            initial_kernel_memory_size,
2107            mpu::Permissions::ReadWriteOnly,
2108            &mut mpu_config,
2109        );
2110        let (app_mpu_mem_start, app_mpu_mem_len) = match app_mpu_mem {
2111            Some((start, len)) => (start, len),
2112            None => {
2113                // We couldn't configure the MPU for the process. This shouldn't
2114                // happen since we were able to start the process before, but at
2115                // this point it is better to leave the app faulted and not
2116                // schedule it.
2117                return Err(ErrorCode::NOMEM);
2118            }
2119        };
2120
2121        // Reset memory pointers now that we know the layout of the process
2122        // memory and know that we can configure the MPU.
2123
2124        // app_brk is set based on minimum syscall size above the start of
2125        // memory.
2126        let app_brk = app_mpu_mem_start.wrapping_add(min_process_memory_size);
2127        self.app_break.set(app_brk);
2128        // kernel_brk is calculated backwards from the end of memory the size of
2129        // the initial kernel data structures.
2130        let kernel_brk = app_mpu_mem_start
2131            .wrapping_add(app_mpu_mem_len)
2132            .wrapping_sub(initial_kernel_memory_size);
2133        self.kernel_memory_break.set(kernel_brk);
2134        // High water mark for `allow`ed memory is reset to the start of the
2135        // process's memory region.
2136        self.allow_high_water_mark.set(app_mpu_mem_start);
2137
2138        // Store the adjusted MPU configuration:
2139        self.mpu_config.replace(mpu_config);
2140
2141        // Handle any architecture-specific requirements for a process when it
2142        // first starts (as it would when it is new).
2143        let ukb_init_process = self.stored_state.map_or(Err(()), |stored_state| unsafe {
2144            self.chip.userspace_kernel_boundary().initialize_process(
2145                app_mpu_mem_start,
2146                app_brk,
2147                stored_state,
2148            )
2149        });
2150        match ukb_init_process {
2151            Ok(()) => {}
2152            Err(()) => {
2153                // We couldn't initialize the architecture-specific state for
2154                // this process. This shouldn't happen since the app was able to
2155                // be started before, but at this point the app is no longer
2156                // valid. The best thing we can do now is leave the app as still
2157                // faulted and not schedule it.
2158                return Err(ErrorCode::RESERVE);
2159            }
2160        }
2161
2162        self.restart_count.increment();
2163
2164        // Mark the state as `Yielded` for the scheduler.
2165        self.state.set(State::Yielded);
2166
2167        // And queue up this app to be restarted.
2168        let flash_start = self.flash_start();
2169        let app_start =
2170            flash_start.wrapping_add(self.header.get_app_start_offset() as usize) as usize;
2171        let init_addr =
2172            flash_start.wrapping_add(self.header.get_init_function_offset() as usize) as usize;
2173
2174        // We need to construct a capability with sufficient authority to cover all of a user's
2175        // code, with permissions to execute it. The entirety of flash is sufficient.
2176
2177        let init_fn = unsafe {
2178            CapabilityPtr::new_with_authority(
2179                init_addr as *const (),
2180                flash_start as usize,
2181                (self.flash_end() as usize) - (flash_start as usize),
2182                CapabilityPtrPermissions::Execute,
2183            )
2184        };
2185
2186        self.enqueue_task(Task::FunctionCall(FunctionCall {
2187            source: FunctionCallSource::Kernel,
2188            pc: init_fn,
2189            argument0: app_start,
2190            argument1: self.memory_start as usize,
2191            argument2: self.memory_len,
2192            argument3: (self.app_break.get() as usize).into(),
2193        }))
2194    }
2195
2196    /// Checks if the buffer represented by the passed in base pointer and size
2197    /// is within the RAM bounds currently exposed to the processes (i.e. ending
2198    /// at `app_break`). If this method returns `true`, the buffer is guaranteed
2199    /// to be accessible to the process and to not overlap with the grant
2200    /// region.
2201    fn in_app_owned_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
2202        // TODO: On some platforms, CapabilityPtr has sufficient authority that we
2203        // could skip this check.
2204        // CapabilityPtr needs to make it slightly further, and we need to add
2205        // interfaces that tell us how much assurance it gives on the current
2206        // platform.
2207        let buf_end_addr = buf_start_addr.wrapping_add(size);
2208
2209        buf_end_addr >= buf_start_addr
2210            && buf_start_addr >= self.mem_start()
2211            && buf_end_addr <= self.app_break.get()
2212    }
2213
2214    /// Checks if the buffer represented by the passed in base pointer and size
2215    /// are within the readable region of an application's flash memory.  If
2216    /// this method returns true, the buffer is guaranteed to be readable to the
2217    /// process.
2218    fn in_app_flash_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
2219        // TODO: On some platforms, CapabilityPtr has sufficient authority that we
2220        // could skip this check.
2221        // CapabilityPtr needs to make it slightly further, and we need to add
2222        // interfaces that tell us how much assurance it gives on the current
2223        // platform.
2224        let buf_end_addr = buf_start_addr.wrapping_add(size);
2225
2226        buf_end_addr >= buf_start_addr
2227            && buf_start_addr >= self.flash_non_protected_start()
2228            && buf_end_addr <= self.flash_end()
2229    }
2230
2231    /// Reset all `grant_ptr`s to NULL.
2232    unsafe fn grant_ptrs_reset(&self) {
2233        self.grant_pointers.map(|grant_pointers| {
2234            for grant_entry in grant_pointers.iter_mut() {
2235                grant_entry.driver_num = 0;
2236                grant_entry.grant_ptr = ptr::null_mut();
2237            }
2238        });
2239    }
2240
2241    /// Allocate memory in a process's grant region.
2242    ///
2243    /// Ensures that the allocation is of `size` bytes and aligned to `align`
2244    /// bytes.
2245    ///
2246    /// If there is not enough memory, or the MPU cannot isolate the process
2247    /// accessible region from the new kernel memory break after doing the
2248    /// allocation, then this will return `None`.
2249    fn allocate_in_grant_region_internal(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
2250        self.mpu_config.and_then(|config| {
2251            // First, compute the candidate new pointer. Note that at this point
2252            // we have not yet checked whether there is space for this
2253            // allocation or that it meets alignment requirements.
2254            let new_break_unaligned = self.kernel_memory_break.get().wrapping_sub(size);
2255
2256            // Our minimum alignment requirement is two bytes, so that the
2257            // lowest bit of the address will always be zero and we can use it
2258            // as a flag. It doesn't hurt to increase the alignment (except for
2259            // potentially a wasted byte) so we make sure `align` is at least
2260            // two.
2261            let align = cmp::max(align, 2);
2262
2263            // The alignment must be a power of two, 2^a. The expression
2264            // `!(align - 1)` then returns a mask with leading ones, followed by
2265            // `a` trailing zeros.
2266            let alignment_mask = !(align - 1);
2267            let new_break = (new_break_unaligned as usize & alignment_mask) as *const u8;
2268
2269            // Verify there is space for this allocation
2270            if new_break < self.app_break.get() {
2271                None
2272                // Verify it didn't wrap around
2273            } else if new_break > self.kernel_memory_break.get() {
2274                None
2275                // Verify this is compatible with the MPU.
2276            } else if let Err(()) = self.chip.mpu().update_app_memory_region(
2277                self.app_break.get(),
2278                new_break,
2279                mpu::Permissions::ReadWriteOnly,
2280                config,
2281            ) {
2282                None
2283            } else {
2284                // Allocation is valid.
2285
2286                // We always allocate down, so we must lower the
2287                // kernel_memory_break.
2288                self.kernel_memory_break.set(new_break);
2289
2290                // We need `grant_ptr` as a mutable pointer.
2291                let grant_ptr = new_break as *mut u8;
2292
2293                // ### Safety
2294                //
2295                // Here we are guaranteeing that `grant_ptr` is not null. We can
2296                // ensure this because we just created `grant_ptr` based on the
2297                // process's allocated memory, and we know it cannot be null.
2298                unsafe { Some(NonNull::new_unchecked(grant_ptr)) }
2299            }
2300        })
2301    }
2302
2303    /// Create the identifier for a custom grant that grant.rs uses to access
2304    /// the custom grant.
2305    ///
2306    /// We create this identifier by calculating the number of bytes between
2307    /// where the custom grant starts and the end of the process memory.
2308    fn create_custom_grant_identifier(&self, ptr: NonNull<u8>) -> ProcessCustomGrantIdentifier {
2309        let custom_grant_address = ptr.as_ptr() as usize;
2310        let process_memory_end = self.mem_end() as usize;
2311
2312        ProcessCustomGrantIdentifier {
2313            offset: process_memory_end - custom_grant_address,
2314        }
2315    }
2316
2317    /// Use a `ProcessCustomGrantIdentifier` to find the address of the
2318    /// custom grant.
2319    ///
2320    /// This reverses `create_custom_grant_identifier()`.
2321    fn get_custom_grant_address(&self, identifier: ProcessCustomGrantIdentifier) -> usize {
2322        let process_memory_end = self.mem_end() as usize;
2323
2324        // Subtract the offset in the identifier from the end of the process
2325        // memory to get the address of the custom grant.
2326        process_memory_end - identifier.offset
2327    }
2328
2329    /// Return the app's read and modify storage permissions from the TBF header
2330    /// if it exists.
2331    ///
2332    /// If the header does not exist then return `None`. If the header does
2333    /// exist, this returns a 5-tuple with:
2334    ///
2335    /// - `write_allowed`: bool. If this process should have write permissions.
2336    /// - `read_count`: usize. How many read IDs are valid.
2337    /// - `read_ids`: [u32]. The read IDs.
2338    /// - `modify_count`: usze. How many modify IDs are valid.
2339    /// - `modify_ids`: [u32]. The modify IDs.
2340    pub fn get_tbf_storage_permissions(&self) -> Option<(bool, usize, [u32; 8], usize, [u32; 8])> {
2341        let read_perms = self.header.get_storage_read_ids();
2342        let modify_perms = self.header.get_storage_modify_ids();
2343
2344        match (read_perms, modify_perms) {
2345            (Some((read_count, read_ids)), Some((modify_count, modify_ids))) => Some((
2346                self.header.get_storage_write_id().is_some(),
2347                read_count,
2348                read_ids,
2349                modify_count,
2350                modify_ids,
2351            )),
2352            _ => None,
2353        }
2354    }
2355
2356    /// The start address of allocated RAM for this process.
2357    fn mem_start(&self) -> *const u8 {
2358        self.memory_start
2359    }
2360
2361    /// The first address after the end of the allocated RAM for this process.
2362    fn mem_end(&self) -> *const u8 {
2363        self.memory_start.wrapping_add(self.memory_len)
2364    }
2365
2366    /// The start address of the flash region allocated for this process.
2367    fn flash_start(&self) -> *const u8 {
2368        self.flash.as_ptr()
2369    }
2370
2371    /// Get the first address of process's flash that isn't protected by the
2372    /// kernel. The protected range of flash contains the TBF header and
2373    /// potentially other state the kernel is storing on behalf of the process,
2374    /// and cannot be edited by the process.
2375    fn flash_non_protected_start(&self) -> *const u8 {
2376        ((self.flash.as_ptr() as usize) + self.header.get_protected_size() as usize) as *const u8
2377    }
2378
2379    /// The first address after the end of the flash region allocated for this
2380    /// process.
2381    fn flash_end(&self) -> *const u8 {
2382        self.flash.as_ptr().wrapping_add(self.flash.len())
2383    }
2384
2385    /// The lowest address of the grant region for the process.
2386    fn kernel_memory_break(&self) -> *const u8 {
2387        self.kernel_memory_break.get()
2388    }
2389
2390    /// Return the highest address the process has access to, or the current
2391    /// process memory brk.
2392    fn app_memory_break(&self) -> *const u8 {
2393        self.app_break.get()
2394    }
2395}