cortexm/
syscall.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Implementation of the architecture-specific portions of the kernel-userland
6//! system call interface.
7
8use core::fmt::Write;
9use core::marker::PhantomData;
10use core::mem::{self, size_of};
11use core::ops::Range;
12use core::ptr::{self, addr_of, addr_of_mut, read_volatile, write_volatile};
13use kernel::errorcode::ErrorCode;
14
15use crate::CortexMVariant;
16
17/// This is used in the syscall handler. When set to 1 this means the
18/// svc_handler was called. Marked `pub` because it is used in the cortex-m*
19/// specific handler.
20#[no_mangle]
21#[used]
22pub static mut SYSCALL_FIRED: usize = 0;
23
24/// This is called in the hard fault handler. When set to 1 this means the hard
25/// fault handler was called. Marked `pub` because it is used in the cortex-m*
26/// specific handler.
27///
28/// n.b. If the kernel hard faults, it immediately panic's. This flag is only
29/// for handling application hard faults.
30#[no_mangle]
31#[used]
32pub static mut APP_HARD_FAULT: usize = 0;
33
34/// This is used in the hardfault handler.
35///
36/// When an app faults, the hardfault handler stores the value of the
37/// SCB registers in this static array. This makes them available to
38/// be displayed in a diagnostic fault message.
39#[no_mangle]
40#[used]
41pub static mut SCB_REGISTERS: [u32; 5] = [0; 5];
42
43// Space for 8 u32s: r0-r3, r12, lr, pc, and xPSR
44const SVC_FRAME_SIZE: usize = 32;
45
46/// This holds all of the state that the kernel must keep for the process when
47/// the process is not executing.
48#[derive(Default)]
49pub struct CortexMStoredState {
50    regs: [usize; 8],
51    yield_pc: usize,
52    psr: usize,
53    psp: usize,
54}
55
56/// Values for encoding the stored state buffer in a binary slice.
57const VERSION: usize = 1;
58const STORED_STATE_SIZE: usize = size_of::<CortexMStoredState>();
59const TAG: [u8; 4] = [b'c', b't', b'x', b'm'];
60const METADATA_LEN: usize = 3;
61
62const VERSION_IDX: usize = 0;
63const SIZE_IDX: usize = 1;
64const TAG_IDX: usize = 2;
65const YIELDPC_IDX: usize = 3;
66const PSR_IDX: usize = 4;
67const PSP_IDX: usize = 5;
68const REGS_IDX: usize = 6;
69const REGS_RANGE: Range<usize> = REGS_IDX..REGS_IDX + 8;
70
71const USIZE_SZ: usize = size_of::<usize>();
72fn usize_byte_range(index: usize) -> Range<usize> {
73    index * USIZE_SZ..(index + 1) * USIZE_SZ
74}
75
76fn usize_from_u8_slice(slice: &[u8], index: usize) -> Result<usize, ErrorCode> {
77    let range = usize_byte_range(index);
78    Ok(usize::from_le_bytes(
79        slice
80            .get(range)
81            .ok_or(ErrorCode::SIZE)?
82            .try_into()
83            .or(Err(ErrorCode::FAIL))?,
84    ))
85}
86
87fn write_usize_to_u8_slice(val: usize, slice: &mut [u8], index: usize) {
88    let range = usize_byte_range(index);
89    slice[range].copy_from_slice(&val.to_le_bytes());
90}
91
92impl core::convert::TryFrom<&[u8]> for CortexMStoredState {
93    type Error = ErrorCode;
94    fn try_from(ss: &[u8]) -> Result<CortexMStoredState, Self::Error> {
95        if ss.len() == size_of::<CortexMStoredState>() + METADATA_LEN * USIZE_SZ
96            && usize_from_u8_slice(ss, VERSION_IDX)? == VERSION
97            && usize_from_u8_slice(ss, SIZE_IDX)? == STORED_STATE_SIZE
98            && usize_from_u8_slice(ss, TAG_IDX)? == u32::from_le_bytes(TAG) as usize
99        {
100            let mut res = CortexMStoredState {
101                regs: [0; 8],
102                yield_pc: usize_from_u8_slice(ss, YIELDPC_IDX)?,
103                psr: usize_from_u8_slice(ss, PSR_IDX)?,
104                psp: usize_from_u8_slice(ss, PSP_IDX)?,
105            };
106            for (i, v) in (REGS_RANGE).enumerate() {
107                res.regs[i] = usize_from_u8_slice(ss, v)?;
108            }
109            Ok(res)
110        } else {
111            Err(ErrorCode::FAIL)
112        }
113    }
114}
115
116/// Implementation of the `UserspaceKernelBoundary` for the Cortex-M non-floating point
117/// architecture.
118pub struct SysCall<A: CortexMVariant>(PhantomData<A>);
119
120impl<A: CortexMVariant> SysCall<A> {
121    pub const unsafe fn new() -> SysCall<A> {
122        SysCall(PhantomData)
123    }
124}
125
126impl<A: CortexMVariant> kernel::syscall::UserspaceKernelBoundary for SysCall<A> {
127    type StoredState = CortexMStoredState;
128
129    fn initial_process_app_brk_size(&self) -> usize {
130        // Cortex-M hardware uses 8 words on the stack to implement context
131        // switches. So we need at least 32 bytes.
132        SVC_FRAME_SIZE
133    }
134
135    unsafe fn initialize_process(
136        &self,
137        accessible_memory_start: *const u8,
138        app_brk: *const u8,
139        state: &mut Self::StoredState,
140    ) -> Result<(), ()> {
141        // We need to initialize the stored state for the process here. This
142        // initialization can be called multiple times for a process, for
143        // example if the process is restarted.
144        state.regs.iter_mut().for_each(|x| *x = 0);
145        state.yield_pc = 0;
146        state.psr = 0x01000000; // Set the Thumb bit and clear everything else.
147        state.psp = app_brk as usize; // Set to top of process-accessible memory.
148
149        // Make sure there's enough room on the stack for the initial SVC frame.
150        if (app_brk as usize - accessible_memory_start as usize) < SVC_FRAME_SIZE {
151            // Not enough room on the stack to add a frame.
152            return Err(());
153        }
154
155        // Allocate the kernel frame
156        state.psp -= SVC_FRAME_SIZE;
157        Ok(())
158    }
159
160    unsafe fn set_syscall_return_value(
161        &self,
162        accessible_memory_start: *const u8,
163        app_brk: *const u8,
164        state: &mut Self::StoredState,
165        return_value: kernel::syscall::SyscallReturn,
166    ) -> Result<(), ()> {
167        // For the Cortex-M arch, write the return values in the same
168        // place that they were originally passed in (i.e. at the
169        // bottom the SVC structure on the stack)
170
171        // First, we need to validate that this location is inside of the
172        // process's accessible memory. Alignment is guaranteed by hardware.
173        if state.psp < accessible_memory_start as usize
174            || state.psp.saturating_add(mem::size_of::<u32>() * 4) > app_brk as usize
175        {
176            return Err(());
177        }
178
179        let sp = state.psp as *mut u32;
180        let (r0, r1, r2, r3) = (sp.offset(0), sp.offset(1), sp.offset(2), sp.offset(3));
181
182        // These operations are only safe so long as
183        // - the pointers are properly aligned. This is guaranteed because the
184        //   pointers are all offset multiples of 4 bytes from the stack
185        //   pointer, which is guaranteed to be properly aligned after
186        //   exception entry on Cortex-M. See
187        //   https://github.com/tock/tock/pull/2478#issuecomment-796389747
188        //   for more details.
189        // - the pointer is dereferencable, i.e. the memory range of
190        //   the given size starting at the pointer must all be within
191        //   the bounds of a single allocated object
192        // - the pointer must point to an initialized instance of its
193        //   type
194        // - during the lifetime of the returned reference (of the
195        //   cast, essentially an arbitrary 'a), the memory must not
196        //   get accessed (read or written) through any other pointer.
197        //
198        // Refer to
199        // https://doc.rust-lang.org/std/primitive.pointer.html#safety-13
200        kernel::utilities::arch_helpers::encode_syscall_return_trd104(
201            &kernel::utilities::arch_helpers::TRD104SyscallReturn::from_syscall_return(
202                return_value,
203            ),
204            &mut *r0,
205            &mut *r1,
206            &mut *r2,
207            &mut *r3,
208        );
209
210        Ok(())
211    }
212
213    /// When the process calls `svc` to enter the kernel, the hardware
214    /// automatically pushes an SVC frame that will be unstacked when the kernel
215    /// returns to the process. In the special case of process startup,
216    /// `initialize_new_process` sets up an empty SVC frame as if an `svc` had
217    /// been called.
218    ///
219    /// Here, we modify this stack frame such that the process resumes at the
220    /// beginning of the callback function that we want the process to run. We
221    /// place the originally intended return address in the link register so
222    /// that when the function completes execution continues.
223    ///
224    /// In effect, this converts `svc` into `bl callback`.
225    unsafe fn set_process_function(
226        &self,
227        accessible_memory_start: *const u8,
228        app_brk: *const u8,
229        state: &mut CortexMStoredState,
230        callback: kernel::process::FunctionCall,
231    ) -> Result<(), ()> {
232        // Ensure that [`state.psp`, `state.psp + SVC_FRAME_SIZE`] is within
233        // process-accessible memory. Alignment is guaranteed by hardware.
234        if state.psp < accessible_memory_start as usize
235            || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize
236        {
237            return Err(());
238        }
239
240        // Notes:
241        //  - Instruction addresses require `|1` to indicate thumb code
242        //  - Stack offset 4 is R12, which the syscall interface ignores
243        let stack_bottom = state.psp as *mut usize;
244        ptr::write(stack_bottom.offset(7), state.psr); //......... -> APSR
245        ptr::write(stack_bottom.offset(6), callback.pc.addr() | 1); //... -> PC
246        ptr::write(stack_bottom.offset(5), state.yield_pc | 1); // -> LR
247        ptr::write(stack_bottom.offset(3), callback.argument3.as_usize()); // -> R3
248        ptr::write(stack_bottom.offset(2), callback.argument2); // -> R2
249        ptr::write(stack_bottom.offset(1), callback.argument1); // -> R1
250        ptr::write(stack_bottom.offset(0), callback.argument0); // -> R0
251
252        Ok(())
253    }
254
255    unsafe fn switch_to_process(
256        &self,
257        accessible_memory_start: *const u8,
258        app_brk: *const u8,
259        state: &mut CortexMStoredState,
260    ) -> (kernel::syscall::ContextSwitchReason, Option<*const u8>) {
261        let new_stack_pointer = A::switch_to_user(state.psp as *const usize, &mut state.regs);
262
263        // We need to keep track of the current stack pointer.
264        state.psp = new_stack_pointer as usize;
265
266        // We need to validate that the stack pointer and the SVC frame are
267        // within process accessible memory. Alignment is guaranteed by
268        // hardware.
269        let invalid_stack_pointer = state.psp < accessible_memory_start as usize
270            || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize;
271
272        // Determine why this returned and the process switched back to the
273        // kernel.
274
275        // Check to see if the fault handler was called while the process was
276        // running.
277        let app_fault = read_volatile(&*addr_of!(APP_HARD_FAULT));
278        write_volatile(&mut *addr_of_mut!(APP_HARD_FAULT), 0);
279
280        // Check to see if the svc_handler was called and the process called a
281        // syscall.
282        let syscall_fired = read_volatile(&*addr_of!(SYSCALL_FIRED));
283        write_volatile(&mut *addr_of_mut!(SYSCALL_FIRED), 0);
284
285        // Now decide the reason based on which flags were set.
286        let switch_reason = if app_fault == 1 || invalid_stack_pointer {
287            // APP_HARD_FAULT takes priority. This means we hit the hardfault
288            // handler and this process faulted.
289            kernel::syscall::ContextSwitchReason::Fault
290        } else if syscall_fired == 1 {
291            // Save these fields after a syscall. If this is a synchronous
292            // syscall (i.e. we return a value to the app immediately) then this
293            // will have no effect. If we are doing something like `yield()`,
294            // however, then we need to have this state.
295            state.yield_pc = ptr::read(new_stack_pointer.offset(6));
296            state.psr = ptr::read(new_stack_pointer.offset(7));
297
298            // Get the syscall arguments and return them along with the syscall.
299            // It's possible the app did something invalid, in which case we put
300            // the app in the fault state.
301            let r0 = ptr::read(new_stack_pointer.offset(0));
302            let r1 = ptr::read(new_stack_pointer.offset(1));
303            let r2 = ptr::read(new_stack_pointer.offset(2));
304            let r3 = ptr::read(new_stack_pointer.offset(3));
305
306            // Get the actual SVC number.
307            let pcptr = ptr::read((new_stack_pointer as *const *const u16).offset(6));
308            let svc_instr = ptr::read(pcptr.offset(-1));
309            let svc_num = (svc_instr & 0xff) as u8;
310
311            // Use the helper function to convert these raw values into a Tock
312            // `Syscall` type.
313            let syscall = kernel::syscall::Syscall::from_register_arguments(
314                svc_num,
315                r0,
316                r1.into(),
317                r2.into(),
318                r3.into(),
319            );
320
321            match syscall {
322                Some(s) => kernel::syscall::ContextSwitchReason::SyscallFired { syscall: s },
323                None => kernel::syscall::ContextSwitchReason::Fault,
324            }
325        } else {
326            // If none of the above cases are true its because the process was interrupted by an
327            // ISR for a hardware event
328            kernel::syscall::ContextSwitchReason::Interrupted
329        };
330
331        (switch_reason, Some(new_stack_pointer as *const u8))
332    }
333
334    unsafe fn print_context(
335        &self,
336        accessible_memory_start: *const u8,
337        app_brk: *const u8,
338        state: &CortexMStoredState,
339        writer: &mut dyn Write,
340    ) {
341        // Check if the stored stack pointer is valid. Alignment is guaranteed
342        // by hardware.
343        let invalid_stack_pointer = state.psp < accessible_memory_start as usize
344            || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize;
345
346        let stack_pointer = state.psp as *const usize;
347
348        // If we cannot use the stack pointer, generate default bad looking
349        // values we can use for the printout. Otherwise, read the correct
350        // values.
351        let (r0, r1, r2, r3, r12, lr, pc, xpsr) = if invalid_stack_pointer {
352            (
353                0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD,
354                0xBAD00BAD,
355            )
356        } else {
357            let r0 = ptr::read(stack_pointer.offset(0));
358            let r1 = ptr::read(stack_pointer.offset(1));
359            let r2 = ptr::read(stack_pointer.offset(2));
360            let r3 = ptr::read(stack_pointer.offset(3));
361            let r12 = ptr::read(stack_pointer.offset(4));
362            let lr = ptr::read(stack_pointer.offset(5));
363            let pc = ptr::read(stack_pointer.offset(6));
364            let xpsr = ptr::read(stack_pointer.offset(7));
365            (r0, r1, r2, r3, r12, lr, pc, xpsr)
366        };
367
368        let _ = writer.write_fmt(format_args!(
369            "\
370             \r\n  R0 : {:#010X}    R6 : {:#010X}\
371             \r\n  R1 : {:#010X}    R7 : {:#010X}\
372             \r\n  R2 : {:#010X}    R8 : {:#010X}\
373             \r\n  R3 : {:#010X}    R10: {:#010X}\
374             \r\n  R4 : {:#010X}    R11: {:#010X}\
375             \r\n  R5 : {:#010X}    R12: {:#010X}\
376             \r\n  R9 : {:#010X} (Static Base Register)\
377             \r\n  SP : {:#010X} (Process Stack Pointer)\
378             \r\n  LR : {:#010X}\
379             \r\n  PC : {:#010X}\
380             \r\n YPC : {:#010X}\
381             \r\n",
382            r0,
383            state.regs[2],
384            r1,
385            state.regs[3],
386            r2,
387            state.regs[4],
388            r3,
389            state.regs[6],
390            state.regs[0],
391            state.regs[7],
392            state.regs[1],
393            r12,
394            state.regs[5],
395            stack_pointer as usize,
396            lr,
397            pc,
398            state.yield_pc,
399        ));
400        let _ = writer.write_fmt(format_args!(
401            "\
402             \r\n APSR: N {} Z {} C {} V {} Q {}\
403             \r\n       GE {} {} {} {}",
404            (xpsr >> 31) & 0x1,
405            (xpsr >> 30) & 0x1,
406            (xpsr >> 29) & 0x1,
407            (xpsr >> 28) & 0x1,
408            (xpsr >> 27) & 0x1,
409            (xpsr >> 19) & 0x1,
410            (xpsr >> 18) & 0x1,
411            (xpsr >> 17) & 0x1,
412            (xpsr >> 16) & 0x1,
413        ));
414        let ici_it = (((xpsr >> 25) & 0x3) << 6) | ((xpsr >> 10) & 0x3f);
415        let thumb_bit = ((xpsr >> 24) & 0x1) == 1;
416        let _ = writer.write_fmt(format_args!(
417            "\
418             \r\n EPSR: ICI.IT {:#04x}\
419             \r\n       ThumbBit {} {}\r\n",
420            ici_it,
421            thumb_bit,
422            if thumb_bit {
423                ""
424            } else {
425                "!!ERROR - Cortex M Thumb only!"
426            },
427        ));
428    }
429
430    fn store_context(
431        &self,
432        state: &CortexMStoredState,
433        out: &mut [u8],
434    ) -> Result<usize, ErrorCode> {
435        if out.len() >= size_of::<CortexMStoredState>() + 3 * USIZE_SZ {
436            write_usize_to_u8_slice(VERSION, out, VERSION_IDX);
437            write_usize_to_u8_slice(STORED_STATE_SIZE, out, SIZE_IDX);
438            write_usize_to_u8_slice(u32::from_le_bytes(TAG) as usize, out, TAG_IDX);
439            write_usize_to_u8_slice(state.yield_pc, out, YIELDPC_IDX);
440            write_usize_to_u8_slice(state.psr, out, PSR_IDX);
441            write_usize_to_u8_slice(state.psp, out, PSP_IDX);
442            for (i, v) in state.regs.iter().enumerate() {
443                write_usize_to_u8_slice(*v, out, REGS_IDX + i);
444            }
445            // + 3 for yield_pc, psr, psp
446            Ok((state.regs.len() + 3 + METADATA_LEN) * USIZE_SZ)
447        } else {
448            Err(ErrorCode::SIZE)
449        }
450    }
451}