kernel/kernel.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Tock's main kernel loop, scheduler loop, and Scheduler trait.
6//!
7//! This module also includes utility functions that are commonly used by
8//! scheduler policy implementations. Scheduling policy (round robin, priority,
9//! etc.) is defined in the `scheduler` subcrate and selected by a board.
10
11use core::cell::Cell;
12use core::num::NonZeroU32;
13
14use crate::capabilities;
15use crate::config;
16use crate::debug;
17use crate::deferred_call::DeferredCall;
18use crate::errorcode::ErrorCode;
19use crate::grant::{AllowRoSize, AllowRwSize, Grant, UpcallSize};
20use crate::ipc;
21use crate::memop;
22use crate::platform::chip::Chip;
23use crate::platform::mpu::MPU;
24use crate::platform::platform::ContextSwitchCallback;
25use crate::platform::platform::KernelResources;
26use crate::platform::platform::{ProcessFault, SyscallDriverLookup, SyscallFilter};
27use crate::platform::scheduler_timer::SchedulerTimer;
28use crate::platform::watchdog::WatchDog;
29use crate::process::ProcessSlot;
30use crate::process::{self, ProcessId, Task};
31use crate::scheduler::{Scheduler, SchedulingDecision};
32use crate::syscall::SyscallDriver;
33use crate::syscall::{ContextSwitchReason, SyscallReturn};
34use crate::syscall::{Syscall, YieldCall};
35use crate::syscall_driver::CommandReturn;
36use crate::upcall::{Upcall, UpcallId};
37use crate::utilities::cells::NumericCellExt;
38
39/// Threshold in microseconds to consider a process's timeslice to be exhausted.
40/// That is, Tock will skip re-scheduling a process if its remaining timeslice
41/// is less than this threshold.
42pub(crate) const MIN_QUANTA_THRESHOLD_US: u32 = 500;
43
44/// Main object for the kernel. Each board will need to create one.
45pub struct Kernel {
46 /// This holds a pointer to the static array of Process pointers.
47 processes: &'static [ProcessSlot],
48
49 /// A counter which keeps track of how many process identifiers have been
50 /// created. This is used to create new unique identifiers for processes.
51 process_identifier_max: Cell<usize>,
52
53 /// How many grant regions have been setup. This is incremented on every
54 /// call to `create_grant()`. We need to explicitly track this so that when
55 /// processes are created they can be allocated pointers for each grant.
56 grant_counter: Cell<usize>,
57
58 /// Flag to mark that grants have been finalized. This means that the kernel
59 /// cannot support creating new grants because processes have already been
60 /// created and the data structures for grants have already been
61 /// established.
62 grants_finalized: Cell<bool>,
63}
64
65/// Represents the different outcomes when trying to allocate a grant region
66enum AllocResult {
67 NoAllocation,
68 NewAllocation,
69 SameAllocation,
70}
71
72/// Tries to allocate the grant region for specified driver and process.
73/// Returns if a new grant was allocated or not
74fn try_allocate_grant(driver: &dyn SyscallDriver, process: &dyn process::Process) -> AllocResult {
75 let before_count = process.grant_allocated_count().unwrap_or(0);
76 match driver.allocate_grant(process.processid()).is_ok() {
77 true if before_count == process.grant_allocated_count().unwrap_or(0) => {
78 AllocResult::SameAllocation
79 }
80 true => AllocResult::NewAllocation,
81 false => AllocResult::NoAllocation,
82 }
83}
84
85impl Kernel {
86 /// Create the kernel object that knows about the list of processes.
87 ///
88 /// Crucially, the processes included in the `processes` array MUST be valid
89 /// to execute. Any credential checks or validation MUST happen before the
90 /// `Process` object is included in this array.
91 pub const fn new(processes: &'static [ProcessSlot]) -> Kernel {
92 Kernel {
93 processes,
94 process_identifier_max: Cell::new(0),
95 grant_counter: Cell::new(0),
96 grants_finalized: Cell::new(false),
97 }
98 }
99
100 /// Helper function that moves all non-generic portions of process_map_or
101 /// into a non-generic function to reduce code bloat from monomorphization.
102 pub(crate) fn get_process(&self, processid: ProcessId) -> Option<&dyn process::Process> {
103 // We use the index in the [`ProcessId`] so we can do a direct lookup.
104 // However, we are not guaranteed that the app still exists at that
105 // index in the processes array. To avoid additional overhead, we do the
106 // lookup and check here, rather than calling `.index()`.
107 self.processes
108 .get(processid.index)
109 .and_then(|pslot| pslot.get())
110 // Check that the process stored here matches the
111 // identifier in the `processid`.
112 .filter(|process| process.processid() == processid)
113 }
114
115 /// Run a closure on a specific process if it exists. If the process with a
116 /// matching [`ProcessId`] does not exist at the index specified within the
117 /// [`ProcessId`], then `default` will be returned.
118 ///
119 /// A match will not be found if the process was removed (and there is a
120 /// `None` in the process array), if the process changed its identifier
121 /// (likely after being restarted), or if the process was moved to a
122 /// different index in the processes array. Note that a match _will_ be
123 /// found if the process still exists in the correct location in the array
124 /// but is in any "stopped" state.
125 pub(crate) fn process_map_or<F, R>(&self, default: R, processid: ProcessId, closure: F) -> R
126 where
127 F: FnOnce(&dyn process::Process) -> R,
128 {
129 match self.get_process(processid) {
130 Some(process) => closure(process),
131 None => default,
132 }
133 }
134
135 /// Run a closure on a specific process if it exists. If the process with a
136 /// matching `ProcessId` does not exist at the index specified within the
137 /// `ProcessId`, then `default` will be returned.
138 ///
139 /// A match will not be found if the process was removed (and there is a
140 /// `None` in the process array), if the process changed its identifier
141 /// (likely after being restarted), or if the process was moved to a
142 /// different index in the processes array. Note that a match _will_ be
143 /// found if the process still exists in the correct location in the array
144 /// but is in any "stopped" state.
145 ///
146 /// This is functionally the same as `process_map_or()`, but this method is
147 /// available outside the kernel crate and requires a
148 /// `ProcessManagementCapability` to use.
149 pub fn process_map_or_external<F, R>(
150 &self,
151 default: R,
152 processid: ProcessId,
153 closure: F,
154 _capability: &dyn capabilities::ProcessManagementCapability,
155 ) -> R
156 where
157 F: FnOnce(&dyn process::Process) -> R,
158 {
159 match self.get_process(processid) {
160 Some(process) => closure(process),
161 None => default,
162 }
163 }
164
165 /// Run a closure on every valid process. This will iterate the array of
166 /// processes and call the closure on every process that exists.
167 pub(crate) fn process_each<F>(&self, closure: F)
168 where
169 F: FnMut(&dyn process::Process),
170 {
171 self.get_process_iter().for_each(closure);
172 }
173
174 pub fn process_iter_capability(
175 &self,
176 _capability: &dyn capabilities::ProcessManagementCapability,
177 ) -> impl Iterator<Item = &dyn process::Process> {
178 self.get_process_iter()
179 }
180
181 /// Returns an iterator over all processes loaded by the kernel.
182 pub(crate) fn get_process_iter(
183 &self,
184 ) -> core::iter::FilterMap<
185 core::slice::Iter<'_, ProcessSlot>,
186 fn(&ProcessSlot) -> Option<&'static dyn process::Process>,
187 > {
188 self.processes.iter().filter_map(ProcessSlot::get)
189 }
190
191 /// Run a closure on every valid process. This will iterate the array of
192 /// processes and call the closure on every process that exists.
193 ///
194 /// This is functionally the same as `process_each()`, but this method is
195 /// available outside the kernel crate and requires a
196 /// `ProcessManagementCapability` to use.
197 pub fn process_each_capability<F>(
198 &'static self,
199 _capability: &dyn capabilities::ProcessManagementCapability,
200 closure: F,
201 ) where
202 F: FnMut(&dyn process::Process),
203 {
204 self.process_each(closure);
205 }
206
207 /// Run a closure on every process, but only continue if the closure returns
208 /// `None`. That is, if the closure returns any non-`None` value, iteration
209 /// stops and the value is returned from this function to the called.
210 pub(crate) fn process_until<T, F>(&self, closure: F) -> Option<T>
211 where
212 F: Fn(&dyn process::Process) -> Option<T>,
213 {
214 for process in self.get_process_iter() {
215 let ret = closure(process);
216 if ret.is_some() {
217 return ret;
218 }
219 }
220 None
221 }
222
223 /// Checks if the provided [`ProcessId`] is still valid given the processes
224 /// stored in the processes array. Returns `true` if the ProcessId still
225 /// refers to a valid process, and `false` if not.
226 ///
227 /// This is needed for `ProcessId` itself to implement the `.index()`
228 /// command to verify that the referenced app is still at the correct index.
229 pub(crate) fn processid_is_valid(&self, processid: &ProcessId) -> bool {
230 self.processes
231 .get(processid.index)
232 .is_some_and(|p| p.contains_process_with_id(processid.id()))
233 }
234
235 /// Create a new grant. This is used in board initialization to setup grants
236 /// that capsules use to interact with processes.
237 ///
238 /// Grants **must** only be created _before_ processes are initialized.
239 /// Processes use the number of grants that have been allocated to correctly
240 /// initialize the process's memory with a pointer for each grant. If a
241 /// grant is created after processes are initialized this will panic.
242 ///
243 /// Calling this function is restricted to only certain users, and to
244 /// enforce this calling this function requires the
245 /// `MemoryAllocationCapability` capability.
246 pub fn create_grant<
247 T: Default,
248 Upcalls: UpcallSize,
249 AllowROs: AllowRoSize,
250 AllowRWs: AllowRwSize,
251 >(
252 &'static self,
253 driver_num: usize,
254 _capability: &dyn capabilities::MemoryAllocationCapability,
255 ) -> Grant<T, Upcalls, AllowROs, AllowRWs> {
256 if self.grants_finalized.get() {
257 panic!("Grants finalized. Cannot create a new grant.");
258 }
259
260 // Create and return a new grant.
261 let grant_index = self.grant_counter.get();
262 self.grant_counter.increment();
263 Grant::new(self, driver_num, grant_index)
264 }
265
266 /// Returns the number of grants that have been setup in the system and
267 /// marks the grants as "finalized". This means that no more grants can
268 /// be created because data structures have been setup based on the number
269 /// of grants when this function is called.
270 ///
271 /// In practice, this is called when processes are created, and the process
272 /// memory is setup based on the number of current grants.
273 pub(crate) fn get_grant_count_and_finalize(&self) -> usize {
274 self.grants_finalized.set(true);
275 self.grant_counter.get()
276 }
277
278 /// Returns the number of grants that have been setup in the system and
279 /// marks the grants as "finalized". This means that no more grants can
280 /// be created because data structures have been setup based on the number
281 /// of grants when this function is called.
282 ///
283 /// In practice, this is called when processes are created, and the process
284 /// memory is setup based on the number of current grants.
285 ///
286 /// This is exposed publicly, but restricted with a capability. The intent
287 /// is that external implementations of `Process` need to be able to
288 /// retrieve the final number of grants.
289 pub fn get_grant_count_and_finalize_external(
290 &self,
291 _capability: &dyn capabilities::ExternalProcessCapability,
292 ) -> usize {
293 self.get_grant_count_and_finalize()
294 }
295
296 /// Create a new unique identifier for a process and return the identifier.
297 ///
298 /// Typically we just choose a larger number than we have used for any
299 /// process before which ensures that the identifier is unique.
300 pub(crate) fn create_process_identifier(&self) -> usize {
301 self.process_identifier_max.get_and_increment()
302 }
303
304 /// Find the next slot that is available for storing a new [`&Process`]
305 /// (Process).
306 ///
307 /// Returns `Err(())` if there are no available slots.
308 pub(crate) fn next_available_process_slot(&self) -> Result<(usize, &ProcessSlot), ()> {
309 for (index, slot) in self.processes.iter().enumerate() {
310 if slot.proc.get().is_none() {
311 return Ok((index, slot));
312 }
313 }
314 Err(())
315 }
316
317 /// Cause all apps to fault.
318 ///
319 /// This will call `set_fault_state()` on each app, causing the app to enter
320 /// the state as if it had crashed (for example with an MPU violation). If
321 /// the process is configured to be restarted it will be.
322 ///
323 /// Only callers with the `ProcessManagementCapability` can call this
324 /// function. This restricts general capsules from being able to call this
325 /// function, since capsules should not be able to arbitrarily restart all
326 /// apps.
327 pub fn hardfault_all_apps<C: capabilities::ProcessManagementCapability>(&self, _c: &C) {
328 for process in self.get_process_iter() {
329 process.set_fault_state();
330 }
331 }
332
333 /// Perform one iteration of the core Tock kernel loop.
334 ///
335 /// This function is responsible for three main operations:
336 ///
337 /// 1. Check if the kernel itself has any work to be done and if the
338 /// scheduler wants to complete that work now. If so, it allows the
339 /// kernel to run.
340 /// 2. Check if any processes have any work to be done, and if so if the
341 /// scheduler wants to allow any processes to run now, and if so which
342 /// one.
343 /// 3. After ensuring the scheduler does not want to complete any kernel or
344 /// process work (or there is no work to be done), are there are no
345 /// outstanding interrupts to handle, put the chip to sleep.
346 ///
347 /// This function has one configuration option: `no_sleep`. If that argument
348 /// is set to true, the kernel will never attempt to put the chip to sleep,
349 /// and this function can be called again immediately.
350 pub fn kernel_loop_operation<KR: KernelResources<C>, C: Chip, const NUM_PROCS: u8>(
351 &self,
352 resources: &KR,
353 chip: &C,
354 ipc: Option<&ipc::IPC<NUM_PROCS>>,
355 no_sleep: bool,
356 _capability: &dyn capabilities::MainLoopCapability,
357 ) {
358 let scheduler = resources.scheduler();
359
360 resources.watchdog().tickle();
361 unsafe {
362 // Ask the scheduler if we should do tasks inside of the kernel,
363 // such as handle interrupts. A scheduler may want to prioritize
364 // processes instead, or there may be no kernel work to do.
365 match scheduler.do_kernel_work_now(chip) {
366 true => {
367 // Execute kernel work. This includes handling
368 // interrupts and is how code in the chips/ and capsules
369 // crates is able to execute.
370 scheduler.execute_kernel_work(chip);
371 }
372 false => {
373 // No kernel work ready, so ask scheduler for a process.
374 match scheduler.next() {
375 SchedulingDecision::RunProcess((processid, timeslice_us)) => {
376 self.process_map_or((), processid, |process| {
377 let (reason, time_executed) =
378 self.do_process(resources, chip, process, ipc, timeslice_us);
379 scheduler.result(reason, time_executed);
380 });
381 }
382 SchedulingDecision::TrySleep => {
383 // For testing, it may be helpful to
384 // disable sleeping the chip in case
385 // the running test does not generate
386 // any interrupts.
387 if !no_sleep {
388 chip.with_interrupts_disabled(|| {
389 // Cannot sleep if interrupts are pending,
390 // as on most platforms unhandled interrupts
391 // will wake the device. Also, if the only
392 // pending interrupt occurred after the
393 // scheduler decided to put the chip to
394 // sleep, but before this atomic section
395 // starts, the interrupt will not be
396 // serviced and the chip will never wake
397 // from sleep.
398 if !chip.has_pending_interrupts() && !DeferredCall::has_tasks()
399 {
400 resources.watchdog().suspend();
401 chip.sleep();
402 resources.watchdog().resume();
403 }
404 });
405 }
406 }
407 }
408 }
409 }
410 }
411 }
412
413 /// Main loop of the OS.
414 ///
415 /// Most of the behavior of this loop is controlled by the [`Scheduler`]
416 /// implementation in use.
417 pub fn kernel_loop<KR: KernelResources<C>, C: Chip, const NUM_PROCS: u8>(
418 &self,
419 resources: &KR,
420 chip: &C,
421 ipc: Option<&ipc::IPC<NUM_PROCS>>,
422 capability: &dyn capabilities::MainLoopCapability,
423 ) -> ! {
424 resources.watchdog().setup();
425 // Before we begin, verify that deferred calls were soundly setup.
426 DeferredCall::verify_setup();
427 loop {
428 self.kernel_loop_operation(resources, chip, ipc, false, capability);
429 }
430 }
431
432 /// Transfer control from the kernel to a userspace process.
433 ///
434 /// This function is called by the main kernel loop to run userspace code.
435 /// Notably, system calls from processes are handled in the kernel, *by the
436 /// kernel thread* in this function, and the syscall return value is set for
437 /// the process immediately. Normally, a process is allowed to continue
438 /// running after calling a syscall. However, the scheduler is given an out,
439 /// as `do_process()` will check with the scheduler before re-executing the
440 /// process to allow it to return from the syscall. If a process yields with
441 /// no upcalls pending, exits, exceeds its timeslice, or is interrupted,
442 /// then `do_process()` will return.
443 ///
444 /// Depending on the particular scheduler in use, this function may act in a
445 /// few different ways. `scheduler.continue_process()` allows the scheduler
446 /// to tell the Kernel whether to continue executing the process, or to
447 /// return control to the scheduler as soon as a kernel task becomes ready
448 /// (either a bottom half interrupt handler or dynamic deferred call), or to
449 /// continue executing the userspace process until it reaches one of the
450 /// aforementioned stopping conditions. Some schedulers may not require a
451 /// scheduler timer; passing `None` for the timeslice will use a null
452 /// scheduler timer even if the chip provides a real scheduler timer.
453 /// Schedulers can pass a timeslice (in us) of their choice, though if the
454 /// passed timeslice is smaller than `MIN_QUANTA_THRESHOLD_US` the process
455 /// will not execute, and this function will return immediately.
456 ///
457 /// This function returns a tuple indicating the reason the reason this
458 /// function has returned to the scheduler, and the amount of time the
459 /// process spent executing (or `None` if the process was run
460 /// cooperatively). Notably, time spent in this function by the kernel,
461 /// executing system calls or merely setting up the switch to/from
462 /// userspace, is charged to the process.
463 fn do_process<KR: KernelResources<C>, C: Chip, const NUM_PROCS: u8>(
464 &self,
465 resources: &KR,
466 chip: &C,
467 process: &dyn process::Process,
468 ipc: Option<&crate::ipc::IPC<NUM_PROCS>>,
469 timeslice_us: Option<NonZeroU32>,
470 ) -> (process::StoppedExecutingReason, Option<u32>) {
471 // We must use a dummy scheduler timer if the process should be executed
472 // without any timeslice restrictions. Note, a chip may not provide a
473 // real scheduler timer implementation even if a timeslice is requested.
474 let scheduler_timer: &dyn SchedulerTimer = if timeslice_us.is_none() {
475 &() // dummy timer, no preemption
476 } else {
477 resources.scheduler_timer()
478 };
479
480 // Clear the scheduler timer and then start the counter. This starts the
481 // process's timeslice. Since the kernel is still executing at this
482 // point, the scheduler timer need not have an interrupt enabled after
483 // `start()`.
484 scheduler_timer.reset();
485 if let Some(timeslice) = timeslice_us {
486 scheduler_timer.start(timeslice)
487 }
488
489 // Need to track why the process is no longer executing so that we can
490 // inform the scheduler.
491 let mut return_reason = process::StoppedExecutingReason::NoWorkLeft;
492
493 // Since the timeslice counts both the process's execution time and the
494 // time spent in the kernel on behalf of the process (setting it up and
495 // handling its syscalls), we intend to keep running the process until
496 // it has no more work to do. We break out of this loop if the scheduler
497 // no longer wants to execute this process or if it exceeds its
498 // timeslice.
499 loop {
500 let stop_running = match scheduler_timer.get_remaining_us() {
501 Some(us) => us.get() <= MIN_QUANTA_THRESHOLD_US,
502 None => true,
503 };
504 if stop_running {
505 // Process ran out of time while the kernel was executing.
506 process.debug_timeslice_expired();
507 return_reason = process::StoppedExecutingReason::TimesliceExpired;
508 break;
509 }
510
511 // Check if the scheduler wishes to continue running this process.
512 let continue_process = resources
513 .scheduler()
514 .continue_process(process.processid(), chip);
515 if !continue_process {
516 return_reason = process::StoppedExecutingReason::KernelPreemption;
517 break;
518 }
519
520 // Check if this process is actually ready to run. If not, we don't
521 // try to run it. This case can happen if a process faults and is
522 // stopped, for example.
523 if !process.ready() {
524 return_reason = process::StoppedExecutingReason::NoWorkLeft;
525 break;
526 }
527
528 match process.get_state() {
529 process::State::Running => {
530 // Running means that this process expects to be running, so
531 // go ahead and set things up and switch to executing the
532 // process. Arming the scheduler timer instructs it to
533 // generate an interrupt when the timeslice has expired. The
534 // underlying timer is not affected.
535 resources
536 .context_switch_callback()
537 .context_switch_hook(process);
538
539 // Configure the MPU for the process to run, and activate
540 // it. With certain memory protection mechanisms such as
541 // RISC-V ePMP, this will make all userspace-accessible
542 // memory inaccessible to kernel mode. When switching back
543 // to the kernel, we must first run `disable_app_mpu` before
544 // attempting to access any userspace memory.
545 process.setup_mpu();
546 chip.mpu().enable_app_mpu();
547
548 scheduler_timer.arm();
549 let context_switch_reason = process.switch_to();
550 scheduler_timer.disarm();
551
552 // Disable the application MPU. This is necessary on systems
553 // like RISC-V ePMP, which makes userspace memory
554 // inaccessible to kernel mode if memory protection is
555 // active.
556 //
557 // # Safety
558 //
559 // This function is unsafe, as calling it before switching
560 // to a process, without first re-enabling memory
561 // protection, could allow an application to access
562 // kernel-private memory. Invoking this function is safe
563 // here, as we always call `enable_app_mpu` above before
564 // switching back to a process.
565 unsafe {
566 chip.mpu().disable_app_mpu();
567 }
568
569 // Now the process has returned back to the kernel. Check
570 // why and handle the process as appropriate.
571 match context_switch_reason {
572 Some(ContextSwitchReason::Fault) => {
573 // The app faulted, check if the chip wants to
574 // handle the fault.
575 if resources
576 .process_fault()
577 .process_fault_hook(process)
578 .is_err()
579 {
580 // Let process deal with it as appropriate.
581 process.set_fault_state();
582 }
583 }
584 Some(ContextSwitchReason::SyscallFired { syscall }) => {
585 self.handle_syscall(resources, process, syscall);
586 }
587 Some(ContextSwitchReason::Interrupted) => {
588 if scheduler_timer.get_remaining_us().is_none() {
589 // This interrupt was a timeslice expiration.
590 process.debug_timeslice_expired();
591 return_reason = process::StoppedExecutingReason::TimesliceExpired;
592 break;
593 }
594 // Go to the beginning of loop to determine whether
595 // to break to handle the interrupt, continue
596 // executing this process, or switch to another
597 // process.
598 continue;
599 }
600 None => {
601 // Something went wrong when switching to this
602 // process. Indicate this by putting it in a fault
603 // state.
604 process.set_fault_state();
605 }
606 }
607 }
608 process::State::Yielded => {
609 // If the process is yielded or hasn't been started it is
610 // waiting for a upcall. If there is a task scheduled for
611 // this process go ahead and set the process to execute it.
612 match process.dequeue_task() {
613 None => break,
614 Some(cb) => match cb {
615 Task::ReturnValue(_) => {
616 // Per TRD104, Yield-Wait does not wake the
617 // process for events that generate Null
618 // Upcalls.
619 break;
620 }
621 Task::FunctionCall(ccb) => {
622 if config::CONFIG.trace_syscalls {
623 debug!(
624 "[{:?}] function_call @{:#x}({:#x}, {:#x}, {:#x}, {:#x})",
625 process.processid(),
626 ccb.pc,
627 ccb.argument0,
628 ccb.argument1,
629 ccb.argument2,
630 ccb.argument3,
631 );
632 }
633 process.set_process_function(ccb);
634 }
635 Task::IPC((otherapp, ipc_type)) => {
636 ipc.map_or_else(
637 || {
638 panic!("Kernel consistency error: IPC Task with no IPC");
639 },
640 |ipc| {
641 // TODO(alevy): this could error for a variety of reasons.
642 // Should we communicate the error somehow?
643 // https://github.com/tock/tock/issues/1993
644 unsafe {
645 let _ = ipc.schedule_upcall(
646 process.processid(),
647 otherapp,
648 ipc_type,
649 );
650 }
651 },
652 );
653 }
654 },
655 }
656 }
657 process::State::YieldedFor(upcall_id) => {
658 // If this process is waiting for a specific upcall, see if
659 // it is ready. If so, dequeue it and return its values to
660 // the process without scheduling the callback.
661 match process.remove_upcall(upcall_id) {
662 None => break,
663 Some(task) => {
664 let (a0, a1, a2) = match task {
665 // There is no callback function registered, we
666 // just return the values provided by the driver
667 Task::ReturnValue(rv) => {
668 if config::CONFIG.trace_syscalls {
669 debug!(
670 "[{:?}] Yield-WaitFor: [NU] ({:#x}, {:#x}, {:#x})",
671 process.processid(),
672 rv.argument0,
673 rv.argument1,
674 rv.argument2,
675 );
676 }
677 (rv.argument0, rv.argument1, rv.argument2)
678 }
679 // There is a registered callback function, but
680 // since the process used `Yield-WaitFor`, we do
681 // not execute it, we just return its arguments
682 // values to the application.
683 Task::FunctionCall(ccb) => {
684 if config::CONFIG.trace_syscalls {
685 debug!(
686 "[{:?}] Yield-WaitFor [Suppressed function_call @{:#x}] ({:#x}, {:#x}, {:#x}, {:#x})",
687 process.processid(),
688 ccb.pc,
689 ccb.argument0,
690 ccb.argument1,
691 ccb.argument2,
692 ccb.argument3,
693 );
694 }
695 (ccb.argument0, ccb.argument1, ccb.argument2)
696 }
697 Task::IPC(_) => todo!(),
698 };
699 process
700 .set_syscall_return_value(SyscallReturn::YieldWaitFor(a0, a1, a2));
701 }
702 }
703 }
704 process::State::Faulted | process::State::Terminated => {
705 // We should never be scheduling an unrunnable process.
706 // This is a potential security flaw: panic.
707 panic!("Attempted to schedule an unrunnable process");
708 }
709 process::State::Stopped(_) => {
710 return_reason = process::StoppedExecutingReason::Stopped;
711 break;
712 }
713 }
714 }
715
716 // Check how much time the process used while it was executing, and
717 // return the value so we can provide it to the scheduler.
718 let time_executed_us = timeslice_us.map(|timeslice| {
719 // Note, we cannot call `.get_remaining_us()` again if it has
720 // previously returned `None`, so we _must_ check the return reason
721 // first.
722 if return_reason == process::StoppedExecutingReason::TimesliceExpired {
723 // used the whole timeslice
724 timeslice.get()
725 } else {
726 match scheduler_timer.get_remaining_us() {
727 Some(remaining) => timeslice.get() - remaining.get(),
728 None => timeslice.get(), // used whole timeslice
729 }
730 }
731 });
732
733 // Reset the scheduler timer in case it unconditionally triggers
734 // interrupts upon expiration. We do not want it to expire while the
735 // chip is sleeping, for example.
736 scheduler_timer.reset();
737
738 (return_reason, time_executed_us)
739 }
740
741 /// Method to invoke a system call on a particular process. Applies the
742 /// kernel system call filtering policy (if any). Handles `Yield` and
743 /// `Exit`, dispatches `Memop` to `memop::memop`, and dispatches peripheral
744 /// driver system calls to peripheral driver capsules through the platforms
745 /// `with_driver` method.
746 #[inline]
747 fn handle_syscall<KR: KernelResources<C>, C: Chip>(
748 &self,
749 resources: &KR,
750 process: &dyn process::Process,
751 syscall: Syscall,
752 ) {
753 // Hook for process debugging.
754 process.debug_syscall_called(syscall);
755
756 // Enforce platform-specific syscall filtering here.
757 //
758 // Before continuing to handle non-yield syscalls the kernel first
759 // checks if the platform wants to block that syscall for the process,
760 // and if it does, sets a return value which is returned to the calling
761 // process.
762 //
763 // Filtering a syscall (i.e. blocking the syscall from running) does not
764 // cause the process to lose its timeslice. The error will be returned
765 // immediately (assuming the process has not already exhausted its
766 // timeslice) allowing the process to decide how to handle the error.
767 match syscall {
768 Syscall::Yield {
769 which: _,
770 param_a: _,
771 param_b: _,
772 } => {} // Yield is not filterable.
773 Syscall::Exit {
774 which: _,
775 completion_code: _,
776 } => {} // Exit is not filterable.
777 Syscall::Memop {
778 operand: _,
779 arg0: _,
780 } => {} // Memop is not filterable.
781 _ => {
782 // Check all other syscalls for filtering.
783 if let Err(response) = resources.syscall_filter().filter_syscall(process, &syscall)
784 {
785 process.set_syscall_return_value(SyscallReturn::Failure(response));
786
787 if config::CONFIG.trace_syscalls {
788 debug!(
789 "[{:?}] Filtered: {:?} was rejected with {:?}",
790 process.processid(),
791 syscall,
792 response
793 );
794 }
795
796 return;
797 }
798 }
799 }
800
801 // Handle each of the syscalls.
802 match syscall {
803 Syscall::Memop { operand, arg0 } => {
804 let rval = memop::memop(process, operand, arg0);
805 if config::CONFIG.trace_syscalls {
806 debug!(
807 "[{:?}] memop({}, {:#x}) = {:?}",
808 process.processid(),
809 operand,
810 arg0,
811 rval
812 );
813 }
814 process.set_syscall_return_value(rval);
815 }
816 Syscall::Yield {
817 which,
818 param_a,
819 param_b,
820 } => {
821 if config::CONFIG.trace_syscalls {
822 debug!("[{:?}] yield. which: {}", process.processid(), which);
823 }
824 match which.try_into() {
825 Ok(YieldCall::NoWait) => {
826 // If this is a `Yield-WaitFor` AND there are no pending
827 // tasks, then return immediately. Otherwise, go into
828 // the yielded state and execute tasks now or when they
829 // arrive.
830 let has_tasks = process.has_tasks();
831
832 // Set the "did I trigger upcalls" flag.
833 // If address is invalid does nothing.
834 //
835 // # Safety
836 //
837 // This is fine as long as no references to the
838 // process's memory exist. We do not have a reference,
839 // so we can safely call `set_byte()`.
840 unsafe {
841 let address = param_a as *mut u8;
842 process.set_byte(address, has_tasks as u8);
843 }
844
845 if has_tasks {
846 process.set_yielded_state();
847 }
848 }
849
850 Ok(YieldCall::Wait) => {
851 process.set_yielded_state();
852 }
853
854 Ok(YieldCall::WaitFor) => {
855 let upcall_id = UpcallId {
856 driver_num: param_a,
857 subscribe_num: param_b,
858 };
859 process.set_yielded_for_state(upcall_id);
860 }
861
862 _ => {
863 // Only 0, 1, and 2 are valid, so this is not a valid
864 // yield system call, Yield does not have a return value
865 // because it can push a function call onto the stack;
866 // just return control to the process.
867 }
868 }
869 }
870 Syscall::Subscribe { driver_number, .. }
871 | Syscall::Command { driver_number, .. }
872 | Syscall::ReadWriteAllow { driver_number, .. }
873 | Syscall::UserspaceReadableAllow { driver_number, .. }
874 | Syscall::ReadOnlyAllow { driver_number, .. } => {
875 resources
876 .syscall_driver_lookup()
877 .with_driver(driver_number, |driver| match syscall {
878 Syscall::Subscribe {
879 driver_number,
880 subdriver_number,
881 upcall_ptr,
882 appdata,
883 } => {
884 // A upcall is identified as a tuple of the driver
885 // number and the subdriver number.
886 let upcall_id = UpcallId {
887 driver_num: driver_number,
888 subscribe_num: subdriver_number,
889 };
890
891 // TODO: when the compiler supports capability types
892 // bring this back as a NonNull
893 // type. https://github.com/tock/tock/issues/4134.
894 //
895 // Previously, we had a NonNull type (that had a niche)
896 // here, and could wrap that in Option to fill the niche
897 // and handle the Null case. CapabilityPtr is filling
898 // the gap left by * const(), which does not have the
899 // niche and allows NULL internally. Having a CHERI
900 // capability type with a niche is (maybe?) predicated
901 // on having better compiler support.
902 // Option<NonNull<()>> is preferable here, and it should
903 // go back to it just as soon as we can express "non
904 // null capability". For now, checking for the null case
905 // is handled internally in each `map_or` call.
906 //
907 //First check if `upcall_ptr` is null. A null
908 //`upcall_ptr` will result in `None` here and
909 //represents the special "unsubscribe" operation.
910 //let ptr = NonNull::new(upcall_ptr);
911
912 // For convenience create an `Upcall` type now. This is
913 // just a data structure and doesn't do any checking or
914 // conversion.
915 let upcall = Upcall::new(process.processid(), upcall_id, appdata, upcall_ptr);
916
917 // If `ptr` is not null, we must first verify that the
918 // upcall function pointer is within process accessible
919 // memory. Per TRD104:
920 //
921 // > If the passed upcall is not valid (is outside
922 // > process executable memory...), the kernel...MUST
923 // > immediately return a failure with a error code of
924 // > `INVALID`.
925 let rval1 = upcall_ptr.map_or(None, |upcall_ptr_nonnull| {
926 if !process.is_valid_upcall_function_pointer(upcall_ptr_nonnull.as_ptr()) {
927 Some(ErrorCode::INVAL)
928 } else {
929 None
930 }
931 });
932
933 // If the upcall is either null or valid, then we
934 // continue handling the upcall.
935 let rval = match rval1 {
936 Some(err) => upcall.into_subscribe_failure(err),
937 None => {
938 match driver {
939 Some(driver) => {
940 // At this point we must save the new
941 // upcall and return the old. The
942 // upcalls are stored by the core kernel
943 // in the grant region so we can
944 // guarantee a correct upcall swap.
945 // However, we do need help with
946 // initially allocating the grant if
947 // this driver has never been used
948 // before.
949 //
950 // To avoid the overhead with checking
951 // for process liveness and grant
952 // allocation, we assume the grant is
953 // initially allocated. If it turns out
954 // it isn't we ask the capsule to
955 // allocate the grant.
956 match crate::grant::subscribe(process, upcall) {
957 Ok(upcall) => upcall.into_subscribe_success(),
958 Err((upcall, err @ ErrorCode::NOMEM)) => {
959 // If we get a memory error, we
960 // always try to allocate the
961 // grant since this could be the
962 // first time the grant is
963 // getting accessed.
964 match try_allocate_grant(driver, process) {
965 AllocResult::NewAllocation => {
966 // Now we try again. It
967 // is possible that the
968 // capsule did not
969 // actually allocate the
970 // grant, at which point
971 // this will fail again
972 // and we return an
973 // error to userspace.
974 match crate::grant::subscribe(
975 process, upcall,
976 ) {
977 // An Ok() returns
978 // the previous
979 // upcall, while
980 // Err() returns the
981 // one that was just
982 // passed.
983 Ok(upcall) => {
984 upcall.into_subscribe_success()
985 }
986 Err((upcall, err)) => {
987 upcall.into_subscribe_failure(err)
988 }
989 }
990 }
991 alloc_failure => {
992 // We didn't actually
993 // create a new alloc,
994 // so just error.
995 match (
996 config::CONFIG.trace_syscalls,
997 alloc_failure,
998 ) {
999 (true, AllocResult::NoAllocation) => {
1000 debug!("[{:?}] WARN driver #{:x} did not allocate grant",
1001 process.processid(), driver_number);
1002 }
1003 (true, AllocResult::SameAllocation) => {
1004 debug!("[{:?}] ERROR driver #{:x} allocated wrong grant counts",
1005 process.processid(), driver_number);
1006 }
1007 _ => {}
1008 }
1009 upcall.into_subscribe_failure(err)
1010 }
1011 }
1012 }
1013 Err((upcall, err)) => {
1014 upcall.into_subscribe_failure(err)
1015 }
1016 }
1017 }
1018 None => upcall.into_subscribe_failure(ErrorCode::NODEVICE),
1019 }
1020 }
1021 };
1022
1023 // Per TRD104, we only clear upcalls if the subscribe
1024 // will return success. At this point we know the result
1025 // and clear if necessary.
1026 if rval.is_success() {
1027 // Only one upcall should exist per tuple. To ensure
1028 // that there are no pending upcalls with the same
1029 // identifier but with the old function pointer, we
1030 // clear them now.
1031 let _ =process.remove_pending_upcalls(upcall_id);
1032 }
1033
1034 if config::CONFIG.trace_syscalls {
1035 debug!(
1036 "[{:?}] subscribe({:#x}, {}, @{:#x}, {:#x}) = {:?}",
1037 process.processid(),
1038 driver_number,
1039 subdriver_number,
1040 upcall_ptr,
1041 appdata,
1042 rval
1043 );
1044 }
1045
1046 process.set_syscall_return_value(rval);
1047 }
1048 Syscall::Command {
1049 driver_number,
1050 subdriver_number,
1051 arg0,
1052 arg1,
1053 } => {
1054 let cres = match driver {
1055 Some(d) => d.command(subdriver_number, arg0, arg1, process.processid()),
1056 None => CommandReturn::failure(ErrorCode::NODEVICE),
1057 };
1058
1059 let res = SyscallReturn::from_command_return(cres);
1060
1061 if config::CONFIG.trace_syscalls {
1062 debug!(
1063 "[{:?}] cmd({:#x}, {}, {:#x}, {:#x}) = {:?}",
1064 process.processid(),
1065 driver_number,
1066 subdriver_number,
1067 arg0,
1068 arg1,
1069 res,
1070 );
1071 }
1072 process.set_syscall_return_value(res);
1073 }
1074 Syscall::ReadWriteAllow {
1075 driver_number,
1076 subdriver_number,
1077 allow_address,
1078 allow_size,
1079 } => {
1080 let res = match driver {
1081 Some(driver) => {
1082 // Try to create an appropriate
1083 // [`ReadWriteProcessBuffer`]. This method will
1084 // ensure that the memory in question is located
1085 // in the process-accessible memory space.
1086 match process
1087 .build_readwrite_process_buffer(allow_address, allow_size)
1088 {
1089 Ok(rw_pbuf) => {
1090 // Creating the
1091 // [`ReadWriteProcessBuffer`] worked,
1092 // try to set in grant.
1093 match crate::grant::allow_rw(
1094 process,
1095 driver_number,
1096 subdriver_number,
1097 rw_pbuf,
1098 ) {
1099 Ok(rw_pbuf) => {
1100 let (ptr, len) = rw_pbuf.consume();
1101 SyscallReturn::AllowReadWriteSuccess(ptr, len)
1102 }
1103 Err((rw_pbuf, err @ ErrorCode::NOMEM)) => {
1104 // If we get a memory error, we
1105 // always try to allocate the
1106 // grant since this could be the
1107 // first time the grant is
1108 // getting accessed.
1109 match try_allocate_grant(driver, process) {
1110 AllocResult::NewAllocation => {
1111 // If we actually
1112 // allocated a new
1113 // grant, try again and
1114 // honor the result.
1115 match crate::grant::allow_rw(
1116 process,
1117 driver_number,
1118 subdriver_number,
1119 rw_pbuf,
1120 ) {
1121 Ok(rw_pbuf) => {
1122 let (ptr, len) = rw_pbuf.consume();
1123 SyscallReturn::AllowReadWriteSuccess(
1124 ptr, len,
1125 )
1126 }
1127 Err((rw_pbuf, err)) => {
1128 let (ptr, len) = rw_pbuf.consume();
1129 SyscallReturn::AllowReadWriteFailure(
1130 err, ptr, len,
1131 )
1132 }
1133 }
1134 }
1135 alloc_failure => {
1136 // We didn't actually
1137 // create a new alloc,
1138 // so just error.
1139 match (
1140 config::CONFIG.trace_syscalls,
1141 alloc_failure,
1142 ) {
1143 (true, AllocResult::NoAllocation) => {
1144 debug!("[{:?}] WARN driver #{:x} did not allocate grant",
1145 process.processid(), driver_number);
1146 }
1147 (true, AllocResult::SameAllocation) => {
1148 debug!("[{:?}] ERROR driver #{:x} allocated wrong grant counts",
1149 process.processid(), driver_number);
1150 }
1151 _ => {}
1152 }
1153 let (ptr, len) = rw_pbuf.consume();
1154 SyscallReturn::AllowReadWriteFailure(
1155 err, ptr, len,
1156 )
1157 }
1158 }
1159 }
1160 Err((rw_pbuf, err)) => {
1161 let (ptr, len) = rw_pbuf.consume();
1162 SyscallReturn::AllowReadWriteFailure(err, ptr, len)
1163 }
1164 }
1165 }
1166 Err(allow_error) => {
1167 // There was an error creating the
1168 // [`ReadWriteProcessBuffer`]. Report
1169 // back to the process with the original
1170 // parameters.
1171 SyscallReturn::AllowReadWriteFailure(
1172 allow_error,
1173 allow_address,
1174 allow_size,
1175 )
1176 }
1177 }
1178 }
1179 None => SyscallReturn::AllowReadWriteFailure(
1180 ErrorCode::NODEVICE,
1181 allow_address,
1182 allow_size,
1183 ),
1184 };
1185
1186 if config::CONFIG.trace_syscalls {
1187 debug!(
1188 "[{:?}] read-write allow({:#x}, {}, @{:#x}, {}) = {:?}",
1189 process.processid(),
1190 driver_number,
1191 subdriver_number,
1192 allow_address as usize,
1193 allow_size,
1194 res
1195 );
1196 }
1197 process.set_syscall_return_value(res);
1198 }
1199 Syscall::UserspaceReadableAllow {
1200 driver_number,
1201 subdriver_number,
1202 allow_address,
1203 allow_size,
1204 } => {
1205 let res = match driver {
1206 Some(d) => {
1207 // Try to create an appropriate
1208 // [`UserspaceReadableProcessBuffer`]. This
1209 // method will ensure that the memory in
1210 // question is located in the process-accessible
1211 // memory space.
1212 match process
1213 .build_readwrite_process_buffer(allow_address, allow_size)
1214 {
1215 Ok(rw_pbuf) => {
1216 // Creating the
1217 // [`UserspaceReadableProcessBuffer`]
1218 // worked, provide it to the capsule.
1219 match d.allow_userspace_readable(
1220 process.processid(),
1221 subdriver_number,
1222 rw_pbuf,
1223 ) {
1224 Ok(returned_pbuf) => {
1225 // The capsule has accepted the
1226 // allow operation. Pass the
1227 // previous buffer information
1228 // back to the process.
1229 let (ptr, len) = returned_pbuf.consume();
1230 SyscallReturn::UserspaceReadableAllowSuccess(
1231 ptr, len,
1232 )
1233 }
1234 Err((rejected_pbuf, err)) => {
1235 // The capsule has rejected the
1236 // allow operation. Pass the new
1237 // buffer information back to
1238 // the process.
1239 let (ptr, len) = rejected_pbuf.consume();
1240 SyscallReturn::UserspaceReadableAllowFailure(
1241 err, ptr, len,
1242 )
1243 }
1244 }
1245 }
1246 Err(allow_error) => {
1247 // There was an error creating the
1248 // [`UserspaceReadableProcessBuffer`].
1249 // Report back to the process.
1250 SyscallReturn::UserspaceReadableAllowFailure(
1251 allow_error,
1252 allow_address,
1253 allow_size,
1254 )
1255 }
1256 }
1257 }
1258
1259 None => SyscallReturn::UserspaceReadableAllowFailure(
1260 ErrorCode::NODEVICE,
1261 allow_address,
1262 allow_size,
1263 ),
1264 };
1265
1266 if config::CONFIG.trace_syscalls {
1267 debug!(
1268 "[{:?}] userspace readable allow({:#x}, {}, @{:#x}, {}) = {:?}",
1269 process.processid(),
1270 driver_number,
1271 subdriver_number,
1272 allow_address as usize,
1273 allow_size,
1274 res
1275 );
1276 }
1277 process.set_syscall_return_value(res);
1278 }
1279 Syscall::ReadOnlyAllow {
1280 driver_number,
1281 subdriver_number,
1282 allow_address,
1283 allow_size,
1284 } => {
1285 let res = match driver {
1286 Some(driver) => {
1287 // Try to create an appropriate
1288 // [`ReadOnlyProcessBuffer`]. This method will
1289 // ensure that the memory in question is located
1290 // in the process-accessible memory space.
1291 match process
1292 .build_readonly_process_buffer(allow_address, allow_size)
1293 {
1294 Ok(ro_pbuf) => {
1295 // Creating the
1296 // [`ReadOnlyProcessBuffer`] worked, try
1297 // to set in grant.
1298 match crate::grant::allow_ro(
1299 process,
1300 driver_number,
1301 subdriver_number,
1302 ro_pbuf,
1303 ) {
1304 Ok(ro_pbuf) => {
1305 let (ptr, len) = ro_pbuf.consume();
1306 SyscallReturn::AllowReadOnlySuccess(ptr, len)
1307 }
1308 Err((ro_pbuf, err @ ErrorCode::NOMEM)) => {
1309 // If we get a memory error, we
1310 // always try to allocate the
1311 // grant since this could be the
1312 // first time the grant is
1313 // getting accessed.
1314 match try_allocate_grant(driver, process) {
1315 AllocResult::NewAllocation => {
1316 // If we actually
1317 // allocated a new
1318 // grant, try again and
1319 // honor the result.
1320 match crate::grant::allow_ro(
1321 process,
1322 driver_number,
1323 subdriver_number,
1324 ro_pbuf,
1325 ) {
1326 Ok(ro_pbuf) => {
1327 let (ptr, len) = ro_pbuf.consume();
1328 SyscallReturn::AllowReadOnlySuccess(
1329 ptr, len,
1330 )
1331 }
1332 Err((ro_pbuf, err)) => {
1333 let (ptr, len) = ro_pbuf.consume();
1334 SyscallReturn::AllowReadOnlyFailure(
1335 err, ptr, len,
1336 )
1337 }
1338 }
1339 }
1340 alloc_failure => {
1341 // We didn't actually
1342 // create a new alloc,
1343 // so just error.
1344 match (
1345 config::CONFIG.trace_syscalls,
1346 alloc_failure,
1347 ) {
1348 (true, AllocResult::NoAllocation) => {
1349 debug!("[{:?}] WARN driver #{:x} did not allocate grant",
1350 process.processid(), driver_number);
1351 }
1352 (true, AllocResult::SameAllocation) => {
1353 debug!("[{:?}] ERROR driver #{:x} allocated wrong grant counts",
1354 process.processid(), driver_number);
1355 }
1356 _ => {}
1357 }
1358 let (ptr, len) = ro_pbuf.consume();
1359 SyscallReturn::AllowReadOnlyFailure(
1360 err, ptr, len,
1361 )
1362 }
1363 }
1364 }
1365 Err((ro_pbuf, err)) => {
1366 let (ptr, len) = ro_pbuf.consume();
1367 SyscallReturn::AllowReadOnlyFailure(err, ptr, len)
1368 }
1369 }
1370 }
1371 Err(allow_error) => {
1372 // There was an error creating the
1373 // [`ReadOnlyProcessBuffer`]. Report
1374 // back to the process with the original
1375 // parameters.
1376 SyscallReturn::AllowReadOnlyFailure(
1377 allow_error,
1378 allow_address,
1379 allow_size,
1380 )
1381 }
1382 }
1383 }
1384 None => SyscallReturn::AllowReadOnlyFailure(
1385 ErrorCode::NODEVICE,
1386 allow_address,
1387 allow_size,
1388 ),
1389 };
1390
1391 if config::CONFIG.trace_syscalls {
1392 debug!(
1393 "[{:?}] read-only allow({:#x}, {}, @{:#x}, {}) = {:?}",
1394 process.processid(),
1395 driver_number,
1396 subdriver_number,
1397 allow_address as usize,
1398 allow_size,
1399 res
1400 );
1401 }
1402
1403 process.set_syscall_return_value(res);
1404 }
1405 Syscall::Yield { .. }
1406 | Syscall::Exit { .. }
1407 | Syscall::Memop { .. } => {
1408 // These variants must not be reachable due to the outer
1409 // match statement:
1410 debug_assert!(false, "Kernel system call handling invariant violated!");
1411 },
1412 })
1413 }
1414 Syscall::Exit {
1415 which,
1416 completion_code,
1417 } => {
1418 // exit try restart modifies the ID of the process.
1419 let old_process_id = process.processid();
1420 let optional_return_value = match which {
1421 // The process called the `exit-terminate` system call.
1422 0 => {
1423 process.terminate(Some(completion_code as u32));
1424 None
1425 }
1426 // The process called the `exit-restart` system call.
1427 1 => {
1428 process.try_restart(Some(completion_code as u32));
1429 None
1430 }
1431 // The process called an invalid variant of the Exit
1432 // system call class.
1433 _ => {
1434 let return_value = SyscallReturn::Failure(ErrorCode::NOSUPPORT);
1435 process.set_syscall_return_value(return_value);
1436 Some(return_value)
1437 }
1438 };
1439 if config::CONFIG.trace_syscalls {
1440 debug!(
1441 "[{:?}] exit(which: {}, completion_code: {}) = {:?}",
1442 old_process_id, which, completion_code, optional_return_value,
1443 );
1444 }
1445 }
1446 }
1447 }
1448}