cortexv7m/
lib.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2024.
4
5//! Generic support for all Cortex-M platforms.
6
7#![no_std]
8
9// These constants are defined in the linker script.
10extern "C" {
11    static _estack: u8;
12    static _sstack: u8;
13}
14
15/// ARMv7-M systick handler function.
16///
17/// For documentation of this function, please see
18/// [`CortexMVariant::SYSTICK_HANDLER`].
19#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
20#[unsafe(naked)]
21pub unsafe extern "C" fn systick_handler_arm_v7m() {
22    use core::arch::naked_asm;
23    naked_asm!(
24        "
25    // Use the CONTROL register to set the thread mode to privileged to switch
26    // back to kernel mode.
27    //
28    // CONTROL[1]: Stack status
29    //   0 = Default stack (MSP) is used
30    //   1 = Alternate stack is used
31    // CONTROL[0]: Mode
32    //   0 = Privileged in thread mode
33    //   1 = User state in thread mode
34    mov r0, #0                        // r0 = 0
35    msr CONTROL, r0                   // CONTROL = 0
36    // CONTROL writes must be followed by an Instruction Synchronization Barrier
37    // (ISB). https://developer.arm.com/documentation/dai0321/latest
38    isb                               // synchronization barrier
39
40    // The link register is set to the `EXC_RETURN` value on exception entry. To
41    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
42    // to 0 to use the main (kernel) stack.
43    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
44
45    // This will resume in the switch_to_user function where application state
46    // is saved and the scheduler can choose what to do next.
47    bx lr
48        "
49    );
50}
51
52/// Handler of `svc` instructions on ARMv7-M.
53///
54/// For documentation of this function, please see
55/// [`CortexMVariant::SVC_HANDLER`].
56#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
57#[unsafe(naked)]
58pub unsafe extern "C" fn svc_handler_arm_v7m() {
59    use core::arch::naked_asm;
60    naked_asm!(
61        "
62    // First check to see which direction we are going in. If the link register
63    // (containing EXC_RETURN) has a 1 in the SPSEL bit (meaning the
64    // alternative/process stack was in use) then we are coming from a process
65    // which has called a syscall.
66    ubfx r0, lr, #2, #1               // r0 = (LR & (0x1<<2)) >> 2
67    cmp r0, #0                        // r0 (SPSEL bit) =≟ 0
68    bne 100f // to_kernel             // if SPSEL == 1, jump to to_kernel
69
70    // If we get here, then this is a context switch from the kernel to the
71    // application. Use the CONTROL register to set the thread mode to
72    // unprivileged to run the application.
73    //
74    // CONTROL[1]: Stack status
75    //   0 = Default stack (MSP) is used
76    //   1 = Alternate stack is used
77    // CONTROL[0]: Mode
78    //   0 = Privileged in thread mode
79    //   1 = User state in thread mode
80    mov r0, #1                        // r0 = 1
81    msr CONTROL, r0                   // CONTROL = 1
82    // CONTROL writes must be followed by an Instruction Synchronization Barrier
83    // (ISB). https://developer.arm.com/documentation/dai0321/latest
84    isb
85
86    // The link register is set to the `EXC_RETURN` value on exception entry. To
87    // ensure we execute using the process stack we set the SPSEL bit to 1
88    // to use the alternate (process) stack.
89    orr lr, lr, #4                    // LR = LR | 0b100
90
91    // Switch to the app.
92    bx lr
93
94100: // to_kernel
95    // An application called a syscall. We mark this in the global variable
96    // `SYSCALL_FIRED` which is stored in the syscall file.
97    // `UserspaceKernelBoundary` will use this variable to decide why the app
98    // stopped executing.
99    ldr r0, =SYSCALL_FIRED            // r0 = &SYSCALL_FIRED
100    mov r1, #1                        // r1 = 1
101    str r1, [r0]                      // *SYSCALL_FIRED = 1
102
103    // Use the CONTROL register to set the thread mode to privileged to switch
104    // back to kernel mode.
105    //
106    // CONTROL[1]: Stack status
107    //   0 = Default stack (MSP) is used
108    //   1 = Alternate stack is used
109    // CONTROL[0]: Mode
110    //   0 = Privileged in thread mode
111    //   1 = User state in thread mode
112    mov r0, #0                        // r0 = 0
113    msr CONTROL, r0                   // CONTROL = 0
114    // CONTROL writes must be followed by an Instruction Synchronization Barrier
115    // (ISB). https://developer.arm.com/documentation/dai0321/latest
116    isb
117
118    // The link register is set to the `EXC_RETURN` value on exception entry. To
119    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
120    // to 0 to use the main (kernel) stack.
121    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
122
123    // Return to the kernel.
124    bx lr
125        "
126    );
127}
128
129/// Generic interrupt handler for ARMv7-M instruction sets.
130///
131/// For documentation of this function, see [`CortexMVariant::GENERIC_ISR`].
132#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
133#[unsafe(naked)]
134pub unsafe extern "C" fn generic_isr_arm_v7m() {
135    use core::arch::naked_asm;
136    naked_asm!(
137        "
138    // Use the CONTROL register to set the thread mode to privileged to ensure
139    // we are executing as the kernel. This may be redundant if the interrupt
140    // happened while the kernel code was executing.
141    //
142    // CONTROL[1]: Stack status
143    //   0 = Default stack (MSP) is used
144    //   1 = Alternate stack is used
145    // CONTROL[0]: Mode
146    //   0 = Privileged in thread mode
147    //   1 = User state in thread mode
148    mov r0, #0                        // r0 = 0
149    msr CONTROL, r0                   // CONTROL = 0
150    // CONTROL writes must be followed by an Instruction Synchronization Barrier
151    // (ISB). https://developer.arm.com/documentation/dai0321/latest
152    isb
153
154    // Now need to disable the interrupt that fired in the NVIC to ensure it
155    // does not trigger again before the scheduler has a chance to handle it. We
156    // do this here in assembly for performance.
157    //
158    // The general idea is:
159    // 1. Get the index of the interrupt that occurred.
160    // 2. Set the disable bit for that interrupt in the NVIC.
161
162    // Find the ISR number (`index`) by looking at the low byte of the IPSR
163    // registers.
164    mrs r0, IPSR                      // r0 = Interrupt Program Status Register (IPSR)
165    and r0, #0xff                     // r0 = r0 & 0xFF; Get lowest 8 bits
166    sub r0, #16                       // r0 = r0 - 16;   ISRs start at 16, so subtract 16 to get zero-indexed.
167
168    // Now disable that interrupt in the NVIC.
169    // High level:
170    //    r0 = index
171    //    NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
172    lsrs r2, r0, #5                   // r2 = r0 / 32
173    // r0 = 1 << (r0 & 31)
174    movs r3, #1                       // r3 = 1
175    and r0, r0, #31                   // r0 = r0 & 31
176    lsl r0, r3, r0                    // r0 = r3 << r0
177
178    // Load the ICER register address.
179    ldr r3, =0xe000e180               // r3 = &NVIC.ICER
180
181    // Here:
182    // - `r2` is index / 32
183    // - `r3` is &NVIC.ICER
184    // - `r0` is 1 << (index & 31)
185    str r0, [r3, r2, lsl #2]          // *(r3 + r2 * 4) = r0
186
187    // The pending bit in ISPR might be reset by hardware for pulse interrupts
188    // at this point. So set it here again so the interrupt does not get lost in
189    // `service_pending_interrupts()`.
190    ldr r3, =0xe000e200               // r3 = &NVIC.ISPR
191    str r0, [r3, r2, lsl #2]          // *(r3 + r2 * 4) = r0
192
193    // The link register is set to the `EXC_RETURN` value on exception entry. To
194    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
195    // to 0 to use the main (kernel) stack.
196    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
197
198    // Now we can return from the interrupt context and resume what we were
199    // doing. If an app was executing we will switch to the kernel so it can
200    // choose whether to service the interrupt.
201    bx lr
202        "
203    );
204}
205
206/// Assembly function to switch into userspace and store/restore application
207/// state.
208///
209/// For documentation of this function, please see
210/// `CortexMVariant::switch_to_user`.
211#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
212pub unsafe fn switch_to_user_arm_v7m(
213    mut user_stack: *const usize,
214    process_regs: &mut [usize; 8],
215) -> *const usize {
216    use core::arch::asm;
217    asm!(
218        "
219    // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
220    // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
221    // the frame pointer. However, in the process of restoring and saving the
222    // process's registers, we do in fact clobber r6, r7 and r9. So, we work
223    // around this by doing our own manual saving of r6 using r2, r7 using r3,
224    // r9 using r12, and then mark those as clobbered.
225    mov r2, r6                        // r2 = r6
226    mov r3, r7                        // r3 = r7
227    mov r12, r9                       // r12 = r9
228
229    // The arguments passed in are:
230    // - `r0` is the bottom of the user stack
231    // - `r1` is a reference to `CortexMStoredState.regs`
232
233    // Load bottom of stack into Process Stack Pointer.
234    msr psp, r0                       // PSP = r0
235
236    // Load non-hardware-stacked registers from the process stored state. Ensure
237    // that the address register (right now r1) is stored in a callee saved
238    // register.
239    ldmia r1, {{r4-r11}}              // r4 = r1[0], r5 = r1[1], ...
240
241    // Generate a SVC exception to handle the context switch from kernel to
242    // userspace. It doesn't matter which SVC number we use here as it is not
243    // used in the exception handler. Data being returned from a syscall is
244    // transferred on the app's stack.
245    svc 0xff
246
247    // When execution returns here we have switched back to the kernel from the
248    // application.
249
250    // Push non-hardware-stacked registers into the saved state for the
251    // application.
252    stmia r1, {{r4-r11}}              // r1[0] = r4, r1[1] = r5, ...
253
254    // Update the user stack pointer with the current value after the
255    // application has executed.
256    mrs r0, PSP                       // r0 = PSP
257
258    // Need to restore r6, r7 and r12 since we clobbered them when switching to
259    // and from the app.
260    mov r6, r2                        // r6 = r2
261    mov r7, r3                        // r7 = r3
262    mov r9, r12                       // r9 = r12
263        ",
264        inout("r0") user_stack,
265        in("r1") process_regs,
266        out("r2") _,
267        out("r3") _,
268        out("r4") _,
269        out("r5") _,
270        out("r8") _,
271        out("r10") _,
272        out("r11") _,
273        out("r12") _,
274    );
275
276    user_stack
277}
278
279#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
280/// Continue the hardfault handler for all hard-faults that occurred
281/// during kernel execution. This function must never return.
282unsafe extern "C" fn hard_fault_handler_arm_v7m_kernel(
283    faulting_stack: *mut u32,
284    stack_overflow: u32,
285) -> ! {
286    if stack_overflow != 0 {
287        // Panic to show the correct error.
288        panic!("kernel stack overflow");
289    } else {
290        // Show the normal kernel hardfault message.
291        let stacked_r0: u32 = *faulting_stack.offset(0);
292        let stacked_r1: u32 = *faulting_stack.offset(1);
293        let stacked_r2: u32 = *faulting_stack.offset(2);
294        let stacked_r3: u32 = *faulting_stack.offset(3);
295        let stacked_r12: u32 = *faulting_stack.offset(4);
296        let stacked_lr: u32 = *faulting_stack.offset(5);
297        let stacked_pc: u32 = *faulting_stack.offset(6);
298        let stacked_xpsr: u32 = *faulting_stack.offset(7);
299
300        let mode_str = "Kernel";
301
302        let shcsr: u32 = core::ptr::read_volatile(0xE000ED24 as *const u32);
303        let cfsr: u32 = core::ptr::read_volatile(0xE000ED28 as *const u32);
304        let hfsr: u32 = core::ptr::read_volatile(0xE000ED2C as *const u32);
305        let mmfar: u32 = core::ptr::read_volatile(0xE000ED34 as *const u32);
306        let bfar: u32 = core::ptr::read_volatile(0xE000ED38 as *const u32);
307
308        let iaccviol = (cfsr & 0x01) == 0x01;
309        let daccviol = (cfsr & 0x02) == 0x02;
310        let munstkerr = (cfsr & 0x08) == 0x08;
311        let mstkerr = (cfsr & 0x10) == 0x10;
312        let mlsperr = (cfsr & 0x20) == 0x20;
313        let mmfarvalid = (cfsr & 0x80) == 0x80;
314
315        let ibuserr = ((cfsr >> 8) & 0x01) == 0x01;
316        let preciserr = ((cfsr >> 8) & 0x02) == 0x02;
317        let impreciserr = ((cfsr >> 8) & 0x04) == 0x04;
318        let unstkerr = ((cfsr >> 8) & 0x08) == 0x08;
319        let stkerr = ((cfsr >> 8) & 0x10) == 0x10;
320        let lsperr = ((cfsr >> 8) & 0x20) == 0x20;
321        let bfarvalid = ((cfsr >> 8) & 0x80) == 0x80;
322
323        let undefinstr = ((cfsr >> 16) & 0x01) == 0x01;
324        let invstate = ((cfsr >> 16) & 0x02) == 0x02;
325        let invpc = ((cfsr >> 16) & 0x04) == 0x04;
326        let nocp = ((cfsr >> 16) & 0x08) == 0x08;
327        let unaligned = ((cfsr >> 16) & 0x100) == 0x100;
328        let divbysero = ((cfsr >> 16) & 0x200) == 0x200;
329
330        let vecttbl = (hfsr & 0x02) == 0x02;
331        let forced = (hfsr & 0x40000000) == 0x40000000;
332
333        let ici_it = (((stacked_xpsr >> 25) & 0x3) << 6) | ((stacked_xpsr >> 10) & 0x3f);
334        let thumb_bit = ((stacked_xpsr >> 24) & 0x1) == 1;
335        let exception_number = (stacked_xpsr & 0x1ff) as usize;
336
337        panic!(
338            "{} HardFault.\r\n\
339         \tKernel version {}\r\n\
340         \tr0  0x{:x}\r\n\
341         \tr1  0x{:x}\r\n\
342         \tr2  0x{:x}\r\n\
343         \tr3  0x{:x}\r\n\
344         \tr12 0x{:x}\r\n\
345         \tlr  0x{:x}\r\n\
346         \tpc  0x{:x}\r\n\
347         \tpsr 0x{:x} [ N {} Z {} C {} V {} Q {} GE {}{}{}{} ; ICI.IT {} T {} ; Exc {}-{} ]\r\n\
348         \tsp  0x{:x}\r\n\
349         \ttop of stack     0x{:x}\r\n\
350         \tbottom of stack  0x{:x}\r\n\
351         \tSHCSR 0x{:x}\r\n\
352         \tCFSR  0x{:x}\r\n\
353         \tHSFR  0x{:x}\r\n\
354         \tInstruction Access Violation:       {}\r\n\
355         \tData Access Violation:              {}\r\n\
356         \tMemory Management Unstacking Fault: {}\r\n\
357         \tMemory Management Stacking Fault:   {}\r\n\
358         \tMemory Management Lazy FP Fault:    {}\r\n\
359         \tInstruction Bus Error:              {}\r\n\
360         \tPrecise Data Bus Error:             {}\r\n\
361         \tImprecise Data Bus Error:           {}\r\n\
362         \tBus Unstacking Fault:               {}\r\n\
363         \tBus Stacking Fault:                 {}\r\n\
364         \tBus Lazy FP Fault:                  {}\r\n\
365         \tUndefined Instruction Usage Fault:  {}\r\n\
366         \tInvalid State Usage Fault:          {}\r\n\
367         \tInvalid PC Load Usage Fault:        {}\r\n\
368         \tNo Coprocessor Usage Fault:         {}\r\n\
369         \tUnaligned Access Usage Fault:       {}\r\n\
370         \tDivide By Zero:                     {}\r\n\
371         \tBus Fault on Vector Table Read:     {}\r\n\
372         \tForced Hard Fault:                  {}\r\n\
373         \tFaulting Memory Address: (valid: {}) {:#010X}\r\n\
374         \tBus Fault Address:       (valid: {}) {:#010X}\r\n\
375         ",
376            mode_str,
377            option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
378            stacked_r0,
379            stacked_r1,
380            stacked_r2,
381            stacked_r3,
382            stacked_r12,
383            stacked_lr,
384            stacked_pc,
385            stacked_xpsr,
386            (stacked_xpsr >> 31) & 0x1,
387            (stacked_xpsr >> 30) & 0x1,
388            (stacked_xpsr >> 29) & 0x1,
389            (stacked_xpsr >> 28) & 0x1,
390            (stacked_xpsr >> 27) & 0x1,
391            (stacked_xpsr >> 19) & 0x1,
392            (stacked_xpsr >> 18) & 0x1,
393            (stacked_xpsr >> 17) & 0x1,
394            (stacked_xpsr >> 16) & 0x1,
395            ici_it,
396            thumb_bit,
397            exception_number,
398            ipsr_isr_number_to_str(exception_number),
399            faulting_stack as u32,
400            core::ptr::addr_of!(_estack) as u32,
401            core::ptr::addr_of!(_sstack) as u32,
402            shcsr,
403            cfsr,
404            hfsr,
405            iaccviol,
406            daccviol,
407            munstkerr,
408            mstkerr,
409            mlsperr,
410            ibuserr,
411            preciserr,
412            impreciserr,
413            unstkerr,
414            stkerr,
415            lsperr,
416            undefinstr,
417            invstate,
418            invpc,
419            nocp,
420            unaligned,
421            divbysero,
422            vecttbl,
423            forced,
424            mmfarvalid,
425            mmfar,
426            bfarvalid,
427            bfar
428        );
429    }
430}
431
432/// ARMv7-M hardfault handler.
433///
434/// For documentation of this function, please see
435/// [`CortexMVariant::HARD_FAULT_HANDLER_HANDLER`].
436#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
437#[unsafe(naked)]
438pub unsafe extern "C" fn hard_fault_handler_arm_v7m() {
439    use core::arch::naked_asm;
440    // First need to determine if this a kernel fault or a userspace fault, and store
441    // the unmodified stack pointer. Place these values in registers, then call
442    // a non-naked function, to allow for use of rust code alongside inline asm.
443    // Because calling a function increases the stack pointer, we have to check for a kernel
444    // stack overflow and adjust the stack pointer before we branch
445    naked_asm!(
446        "
447    mov    r2, 0     // r2 = 0
448    tst    lr, #4    // bitwise AND link register to 0b100
449    itte   eq        // if lr==4, run next two instructions, else, run 3rd instruction.
450    mrseq  r0, msp   // r0 = kernel stack pointer
451    addeq  r2, 1     // r2 = 1, kernel was executing
452    mrsne  r0, psp   // r0 = userland stack pointer
453    // Need to determine if we had a stack overflow before we push anything
454    // on to the stack. We check this by looking at the BusFault Status
455    // Register's (BFSR) `LSPERR` and `STKERR` bits to see if the hardware
456    // had any trouble stacking important registers to the stack during the
457    // fault. If so, then we cannot use this stack while handling this fault
458    // or we will trigger another fault.
459    ldr   r3, =0xE000ED29  // SCB BFSR register address
460    ldrb  r3, [r3]         // r3 = BFSR
461    tst   r3, #0x30        // r3 = BFSR & 0b00110000; LSPERR & STKERR bits
462    ite   ne               // check if the result of that bitwise AND was not 0
463    movne r1, #1           // BFSR & 0b00110000 != 0; r1 = 1
464    moveq r1, #0           // BFSR & 0b00110000 == 0; r1 = 0
465    and r5, r2, r1         // bitwise and r1 and r2, store in r5
466    cmp  r5, #1            //  update condition codes to reflect if r1 == 1 && r2 == 1
467    itt  eq                // if r5==1 run the next 2 instructions, else skip to branch
468    // if true, The hardware couldn't use the stack, so we have no saved data and
469    // we cannot use the kernel stack as is. We just want to report that
470    // the kernel's stack overflowed, since that is essential for
471    // debugging.
472    //
473    // To make room for a panic!() handler stack, we just re-use the
474    // kernel's original stack. This should in theory leave the bottom
475    // of the stack where the problem occurred untouched should one want
476    // to further debug.
477    ldreq  r4, ={estack} // load _estack into r4
478    moveq  sp, r4        // Set the stack pointer to _estack
479    // finally, if the fault occurred in privileged mode (r2 == 1), branch
480    // to non-naked handler.
481    cmp r2, #0
482    // Per ARM calling convention, faulting stack is passed in r0, whether
483    // there was a stack overflow in r1. This function must never return.
484    bne {kernel_hard_fault_handler} // branch to kernel hard fault handler
485    // Otherwise, the hard fault occurred in userspace. In this case, read
486    // the relevant SCB registers:
487    ldr r0, =SCB_REGISTERS    // Global variable address
488    ldr r1, =0xE000ED14       // SCB CCR register address
489    ldr r2, [r1, #0]          // CCR
490    str r2, [r0, #0]
491    ldr r2, [r1, #20]         // CFSR
492    str r2, [r0, #4]
493    ldr r2, [r1, #24]         // HFSR
494    str r2, [r0, #8]
495    ldr r2, [r1, #32]         // MMFAR
496    str r2, [r0, #12]
497    ldr r2, [r1, #36]         // BFAR
498    str r2, [r0, #16]
499
500    ldr r0, =APP_HARD_FAULT  // Global variable address
501    mov r1, #1               // r1 = 1
502    str r1, [r0, #0]         // APP_HARD_FAULT = 1
503
504    // Set thread mode to privileged
505    mov r0, #0
506    msr CONTROL, r0
507    // CONTROL writes must be followed by ISB
508    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
509    isb
510
511    // The link register is set to the `EXC_RETURN` value on exception
512    // entry. To ensure we continue executing in the kernel we ensure the
513    // SPSEL bit is set to 0 to use the main (kernel) stack.
514    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
515
516    bx lr
517        ",
518        estack = sym _estack,
519        kernel_hard_fault_handler = sym hard_fault_handler_arm_v7m_kernel,
520    );
521}
522
523// Table 2.5
524// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0553a/CHDBIBGJ.html
525pub fn ipsr_isr_number_to_str(isr_number: usize) -> &'static str {
526    match isr_number {
527        0 => "Thread Mode",
528        1 => "Reserved",
529        2 => "NMI",
530        3 => "HardFault",
531        4 => "MemManage",
532        5 => "BusFault",
533        6 => "UsageFault",
534        7..=10 => "Reserved",
535        11 => "SVCall",
536        12 => "Reserved for Debug",
537        13 => "Reserved",
538        14 => "PendSV",
539        15 => "SysTick",
540        16..=255 => "IRQn",
541        _ => "(Unknown! Illegal value?)",
542    }
543}
544
545///////////////////////////////////////////////////////////////////
546// Mock implementations for running tests on CI.
547//
548// Since tests run on the local architecture, we have to remove any
549// ARM assembly since it will not compile.
550///////////////////////////////////////////////////////////////////
551
552#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
553pub unsafe extern "C" fn systick_handler_arm_v7m() {
554    unimplemented!()
555}
556
557#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
558pub unsafe extern "C" fn svc_handler_arm_v7m() {
559    unimplemented!()
560}
561
562#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
563pub unsafe extern "C" fn generic_isr_arm_v7m() {
564    unimplemented!()
565}
566
567#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
568pub unsafe extern "C" fn switch_to_user_arm_v7m(
569    _user_stack: *const u8,
570    _process_regs: &mut [usize; 8],
571) -> *const usize {
572    unimplemented!()
573}
574
575#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
576pub unsafe extern "C" fn hard_fault_handler_arm_v7m() {
577    unimplemented!()
578}