cortexv7m/
lib.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2024.
4
5//! Generic support for all Cortex-M platforms.
6
7#![no_std]
8
9// These constants are defined in the linker script.
10extern "C" {
11    static _estack: u8;
12    static _sstack: u8;
13}
14
15#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
16extern "C" {
17    /// ARMv7-M systick handler function.
18    ///
19    /// For documentation of this function, please see
20    /// `CortexMVariant::SYSTICK_HANDLER`.
21    pub fn systick_handler_arm_v7m();
22}
23
24#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
25core::arch::global_asm!(
26    "
27    .section .systick_handler_arm_v7m, \"ax\"
28    .global systick_handler_arm_v7m
29    .thumb_func
30  systick_handler_arm_v7m:
31    // Use the CONTROL register to set the thread mode to privileged to switch
32    // back to kernel mode.
33    //
34    // CONTROL[1]: Stack status
35    //   0 = Default stack (MSP) is used
36    //   1 = Alternate stack is used
37    // CONTROL[0]: Mode
38    //   0 = Privileged in thread mode
39    //   1 = User state in thread mode
40    mov r0, #0                        // r0 = 0
41    msr CONTROL, r0                   // CONTROL = 0
42    // CONTROL writes must be followed by an Instruction Synchronization Barrier
43    // (ISB). https://developer.arm.com/documentation/dai0321/latest
44    isb                               // synchronization barrier
45
46    // The link register is set to the `EXC_RETURN` value on exception entry. To
47    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
48    // to 0 to use the main (kernel) stack.
49    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
50
51    // This will resume in the switch_to_user function where application state
52    // is saved and the scheduler can choose what to do next.
53    bx lr
54    "
55);
56
57#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
58extern "C" {
59    /// Handler of `svc` instructions on ARMv7-M.
60    ///
61    /// For documentation of this function, please see
62    /// `CortexMVariant::SVC_HANDLER`.
63    pub fn svc_handler_arm_v7m();
64}
65
66#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
67core::arch::global_asm!(
68    "
69    .section .svc_handler_arm_v7m, \"ax\"
70    .global svc_handler_arm_v7m
71    .thumb_func
72  svc_handler_arm_v7m:
73    // First check to see which direction we are going in. If the link register
74    // (containing EXC_RETURN) has a 1 in the SPSEL bit (meaning the
75    // alternative/process stack was in use) then we are coming from a process
76    // which has called a syscall.
77    ubfx r0, lr, #2, #1               // r0 = (LR & (0x1<<2)) >> 2
78    cmp r0, #0                        // r0 (SPSEL bit) =≟ 0
79    bne 100f // to_kernel             // if SPSEL == 1, jump to to_kernel
80
81    // If we get here, then this is a context switch from the kernel to the
82    // application. Use the CONTROL register to set the thread mode to
83    // unprivileged to run the application.
84    //
85    // CONTROL[1]: Stack status
86    //   0 = Default stack (MSP) is used
87    //   1 = Alternate stack is used
88    // CONTROL[0]: Mode
89    //   0 = Privileged in thread mode
90    //   1 = User state in thread mode
91    mov r0, #1                        // r0 = 1
92    msr CONTROL, r0                   // CONTROL = 1
93    // CONTROL writes must be followed by an Instruction Synchronization Barrier
94    // (ISB). https://developer.arm.com/documentation/dai0321/latest
95    isb
96
97    // The link register is set to the `EXC_RETURN` value on exception entry. To
98    // ensure we execute using the process stack we set the SPSEL bit to 1
99    // to use the alternate (process) stack.
100    orr lr, lr, #4                    // LR = LR | 0b100
101
102    // Switch to the app.
103    bx lr
104
105  100: // to_kernel
106    // An application called a syscall. We mark this in the global variable
107    // `SYSCALL_FIRED` which is stored in the syscall file.
108    // `UserspaceKernelBoundary` will use this variable to decide why the app
109    // stopped executing.
110    ldr r0, =SYSCALL_FIRED            // r0 = &SYSCALL_FIRED
111    mov r1, #1                        // r1 = 1
112    str r1, [r0]                      // *SYSCALL_FIRED = 1
113
114    // Use the CONTROL register to set the thread mode to privileged to switch
115    // back to kernel mode.
116    //
117    // CONTROL[1]: Stack status
118    //   0 = Default stack (MSP) is used
119    //   1 = Alternate stack is used
120    // CONTROL[0]: Mode
121    //   0 = Privileged in thread mode
122    //   1 = User state in thread mode
123    mov r0, #0                        // r0 = 0
124    msr CONTROL, r0                   // CONTROL = 0
125    // CONTROL writes must be followed by an Instruction Synchronization Barrier
126    // (ISB). https://developer.arm.com/documentation/dai0321/latest
127    isb
128
129    // The link register is set to the `EXC_RETURN` value on exception entry. To
130    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
131    // to 0 to use the main (kernel) stack.
132    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
133
134    // Return to the kernel.
135    bx lr
136    "
137);
138
139#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
140extern "C" {
141    /// Generic interrupt handler for ARMv7-M instruction sets.
142    ///
143    /// For documentation of this function, see `CortexMVariant::GENERIC_ISR`.
144    pub fn generic_isr_arm_v7m();
145}
146#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
147core::arch::global_asm!(
148        "
149    .section .generic_isr_arm_v7m, \"ax\"
150    .global generic_isr_arm_v7m
151    .thumb_func
152  generic_isr_arm_v7m:
153    // Use the CONTROL register to set the thread mode to privileged to ensure
154    // we are executing as the kernel. This may be redundant if the interrupt
155    // happened while the kernel code was executing.
156    //
157    // CONTROL[1]: Stack status
158    //   0 = Default stack (MSP) is used
159    //   1 = Alternate stack is used
160    // CONTROL[0]: Mode
161    //   0 = Privileged in thread mode
162    //   1 = User state in thread mode
163    mov r0, #0                        // r0 = 0
164    msr CONTROL, r0                   // CONTROL = 0
165    // CONTROL writes must be followed by an Instruction Synchronization Barrier
166    // (ISB). https://developer.arm.com/documentation/dai0321/latest
167    isb
168
169    // Now need to disable the interrupt that fired in the NVIC to ensure it
170    // does not trigger again before the scheduler has a chance to handle it. We
171    // do this here in assembly for performance.
172    //
173    // The general idea is:
174    // 1. Get the index of the interrupt that occurred.
175    // 2. Set the disable bit for that interrupt in the NVIC.
176
177    // Find the ISR number (`index`) by looking at the low byte of the IPSR
178    // registers.
179    mrs r0, IPSR                      // r0 = Interrupt Program Status Register (IPSR)
180    and r0, #0xff                     // r0 = r0 & 0xFF; Get lowest 8 bits
181    sub r0, #16                       // r0 = r0 - 16;   ISRs start at 16, so subtract 16 to get zero-indexed.
182
183    // Now disable that interrupt in the NVIC.
184    // High level:
185    //    r0 = index
186    //    NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
187    lsrs r2, r0, #5                   // r2 = r0 / 32
188    // r0 = 1 << (r0 & 31)
189    movs r3, #1                       // r3 = 1
190    and r0, r0, #31                   // r0 = r0 & 31
191    lsl r0, r3, r0                    // r0 = r3 << r0
192
193    // Load the ICER register address.
194    ldr r3, =0xe000e180               // r3 = &NVIC.ICER
195
196    // Here:
197    // - `r2` is index / 32
198    // - `r3` is &NVIC.ICER
199    // - `r0` is 1 << (index & 31)
200    str r0, [r3, r2, lsl #2]          // *(r3 + r2 * 4) = r0
201
202    // The pending bit in ISPR might be reset by hardware for pulse interrupts
203    // at this point. So set it here again so the interrupt does not get lost in
204    // `service_pending_interrupts()`.
205    ldr r3, =0xe000e200               // r3 = &NVIC.ISPR
206    str r0, [r3, r2, lsl #2]          // *(r3 + r2 * 4) = r0
207
208    // The link register is set to the `EXC_RETURN` value on exception entry. To
209    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
210    // to 0 to use the main (kernel) stack.
211    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
212
213    // Now we can return from the interrupt context and resume what we were
214    // doing. If an app was executing we will switch to the kernel so it can
215    // choose whether to service the interrupt.
216    bx lr
217    ");
218
219/// Assembly function to switch into userspace and store/restore application
220/// state.
221///
222/// For documentation of this function, please see
223/// `CortexMVariant::switch_to_user`.
224#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
225pub unsafe fn switch_to_user_arm_v7m(
226    mut user_stack: *const usize,
227    process_regs: &mut [usize; 8],
228) -> *const usize {
229    use core::arch::asm;
230    asm!(
231    "
232    // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
233    // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
234    // the frame pointer. However, in the process of restoring and saving the
235    // process's registers, we do in fact clobber r6, r7 and r9. So, we work
236    // around this by doing our own manual saving of r6 using r2, r7 using r3,
237    // r9 using r12, and then mark those as clobbered.
238    mov r2, r6                        // r2 = r6
239    mov r3, r7                        // r3 = r7
240    mov r12, r9                       // r12 = r9
241
242    // The arguments passed in are:
243    // - `r0` is the bottom of the user stack
244    // - `r1` is a reference to `CortexMStoredState.regs`
245
246    // Load bottom of stack into Process Stack Pointer.
247    msr psp, r0                       // PSP = r0
248
249    // Load non-hardware-stacked registers from the process stored state. Ensure
250    // that the address register (right now r1) is stored in a callee saved
251    // register.
252    ldmia r1, {{r4-r11}}              // r4 = r1[0], r5 = r1[1], ...
253
254    // Generate a SVC exception to handle the context switch from kernel to
255    // userspace. It doesn't matter which SVC number we use here as it is not
256    // used in the exception handler. Data being returned from a syscall is
257    // transferred on the app's stack.
258    svc 0xff
259
260    // When execution returns here we have switched back to the kernel from the
261    // application.
262
263    // Push non-hardware-stacked registers into the saved state for the
264    // application.
265    stmia r1, {{r4-r11}}              // r1[0] = r4, r1[1] = r5, ...
266
267    // Update the user stack pointer with the current value after the
268    // application has executed.
269    mrs r0, PSP                       // r0 = PSP
270
271    // Need to restore r6, r7 and r12 since we clobbered them when switching to
272    // and from the app.
273    mov r6, r2                        // r6 = r2
274    mov r7, r3                        // r7 = r3
275    mov r9, r12                       // r9 = r12
276    ",
277    inout("r0") user_stack,
278    in("r1") process_regs,
279    out("r2") _, out("r3") _, out("r4") _, out("r5") _, out("r8") _, out("r10") _,
280    out("r11") _, out("r12") _);
281
282    user_stack
283}
284
285#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
286/// Continue the hardfault handler for all hard-faults that occurred
287/// during kernel execution. This function must never return.
288unsafe extern "C" fn hard_fault_handler_arm_v7m_kernel(
289    faulting_stack: *mut u32,
290    stack_overflow: u32,
291) -> ! {
292    if stack_overflow != 0 {
293        // Panic to show the correct error.
294        panic!("kernel stack overflow");
295    } else {
296        // Show the normal kernel hardfault message.
297        let stacked_r0: u32 = *faulting_stack.offset(0);
298        let stacked_r1: u32 = *faulting_stack.offset(1);
299        let stacked_r2: u32 = *faulting_stack.offset(2);
300        let stacked_r3: u32 = *faulting_stack.offset(3);
301        let stacked_r12: u32 = *faulting_stack.offset(4);
302        let stacked_lr: u32 = *faulting_stack.offset(5);
303        let stacked_pc: u32 = *faulting_stack.offset(6);
304        let stacked_xpsr: u32 = *faulting_stack.offset(7);
305
306        let mode_str = "Kernel";
307
308        let shcsr: u32 = core::ptr::read_volatile(0xE000ED24 as *const u32);
309        let cfsr: u32 = core::ptr::read_volatile(0xE000ED28 as *const u32);
310        let hfsr: u32 = core::ptr::read_volatile(0xE000ED2C as *const u32);
311        let mmfar: u32 = core::ptr::read_volatile(0xE000ED34 as *const u32);
312        let bfar: u32 = core::ptr::read_volatile(0xE000ED38 as *const u32);
313
314        let iaccviol = (cfsr & 0x01) == 0x01;
315        let daccviol = (cfsr & 0x02) == 0x02;
316        let munstkerr = (cfsr & 0x08) == 0x08;
317        let mstkerr = (cfsr & 0x10) == 0x10;
318        let mlsperr = (cfsr & 0x20) == 0x20;
319        let mmfarvalid = (cfsr & 0x80) == 0x80;
320
321        let ibuserr = ((cfsr >> 8) & 0x01) == 0x01;
322        let preciserr = ((cfsr >> 8) & 0x02) == 0x02;
323        let impreciserr = ((cfsr >> 8) & 0x04) == 0x04;
324        let unstkerr = ((cfsr >> 8) & 0x08) == 0x08;
325        let stkerr = ((cfsr >> 8) & 0x10) == 0x10;
326        let lsperr = ((cfsr >> 8) & 0x20) == 0x20;
327        let bfarvalid = ((cfsr >> 8) & 0x80) == 0x80;
328
329        let undefinstr = ((cfsr >> 16) & 0x01) == 0x01;
330        let invstate = ((cfsr >> 16) & 0x02) == 0x02;
331        let invpc = ((cfsr >> 16) & 0x04) == 0x04;
332        let nocp = ((cfsr >> 16) & 0x08) == 0x08;
333        let unaligned = ((cfsr >> 16) & 0x100) == 0x100;
334        let divbysero = ((cfsr >> 16) & 0x200) == 0x200;
335
336        let vecttbl = (hfsr & 0x02) == 0x02;
337        let forced = (hfsr & 0x40000000) == 0x40000000;
338
339        let ici_it = (((stacked_xpsr >> 25) & 0x3) << 6) | ((stacked_xpsr >> 10) & 0x3f);
340        let thumb_bit = ((stacked_xpsr >> 24) & 0x1) == 1;
341        let exception_number = (stacked_xpsr & 0x1ff) as usize;
342
343        panic!(
344            "{} HardFault.\r\n\
345         \tKernel version {}\r\n\
346         \tr0  0x{:x}\r\n\
347         \tr1  0x{:x}\r\n\
348         \tr2  0x{:x}\r\n\
349         \tr3  0x{:x}\r\n\
350         \tr12 0x{:x}\r\n\
351         \tlr  0x{:x}\r\n\
352         \tpc  0x{:x}\r\n\
353         \tpsr 0x{:x} [ N {} Z {} C {} V {} Q {} GE {}{}{}{} ; ICI.IT {} T {} ; Exc {}-{} ]\r\n\
354         \tsp  0x{:x}\r\n\
355         \ttop of stack     0x{:x}\r\n\
356         \tbottom of stack  0x{:x}\r\n\
357         \tSHCSR 0x{:x}\r\n\
358         \tCFSR  0x{:x}\r\n\
359         \tHSFR  0x{:x}\r\n\
360         \tInstruction Access Violation:       {}\r\n\
361         \tData Access Violation:              {}\r\n\
362         \tMemory Management Unstacking Fault: {}\r\n\
363         \tMemory Management Stacking Fault:   {}\r\n\
364         \tMemory Management Lazy FP Fault:    {}\r\n\
365         \tInstruction Bus Error:              {}\r\n\
366         \tPrecise Data Bus Error:             {}\r\n\
367         \tImprecise Data Bus Error:           {}\r\n\
368         \tBus Unstacking Fault:               {}\r\n\
369         \tBus Stacking Fault:                 {}\r\n\
370         \tBus Lazy FP Fault:                  {}\r\n\
371         \tUndefined Instruction Usage Fault:  {}\r\n\
372         \tInvalid State Usage Fault:          {}\r\n\
373         \tInvalid PC Load Usage Fault:        {}\r\n\
374         \tNo Coprocessor Usage Fault:         {}\r\n\
375         \tUnaligned Access Usage Fault:       {}\r\n\
376         \tDivide By Zero:                     {}\r\n\
377         \tBus Fault on Vector Table Read:     {}\r\n\
378         \tForced Hard Fault:                  {}\r\n\
379         \tFaulting Memory Address: (valid: {}) {:#010X}\r\n\
380         \tBus Fault Address:       (valid: {}) {:#010X}\r\n\
381         ",
382            mode_str,
383            option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
384            stacked_r0,
385            stacked_r1,
386            stacked_r2,
387            stacked_r3,
388            stacked_r12,
389            stacked_lr,
390            stacked_pc,
391            stacked_xpsr,
392            (stacked_xpsr >> 31) & 0x1,
393            (stacked_xpsr >> 30) & 0x1,
394            (stacked_xpsr >> 29) & 0x1,
395            (stacked_xpsr >> 28) & 0x1,
396            (stacked_xpsr >> 27) & 0x1,
397            (stacked_xpsr >> 19) & 0x1,
398            (stacked_xpsr >> 18) & 0x1,
399            (stacked_xpsr >> 17) & 0x1,
400            (stacked_xpsr >> 16) & 0x1,
401            ici_it,
402            thumb_bit,
403            exception_number,
404            ipsr_isr_number_to_str(exception_number),
405            faulting_stack as u32,
406            core::ptr::addr_of!(_estack) as u32,
407            core::ptr::addr_of!(_sstack) as u32,
408            shcsr,
409            cfsr,
410            hfsr,
411            iaccviol,
412            daccviol,
413            munstkerr,
414            mstkerr,
415            mlsperr,
416            ibuserr,
417            preciserr,
418            impreciserr,
419            unstkerr,
420            stkerr,
421            lsperr,
422            undefinstr,
423            invstate,
424            invpc,
425            nocp,
426            unaligned,
427            divbysero,
428            vecttbl,
429            forced,
430            mmfarvalid,
431            mmfar,
432            bfarvalid,
433            bfar
434        );
435    }
436}
437
438#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
439extern "C" {
440    /// ARMv7-M hardfault handler.
441    ///
442    /// For documentation of this function, please see
443    /// `CortexMVariant::HARD_FAULT_HANDLER_HANDLER`.
444    pub fn hard_fault_handler_arm_v7m();
445}
446
447#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
448// First need to determine if this a kernel fault or a userspace fault, and store
449// the unmodified stack pointer. Place these values in registers, then call
450// a non-naked function, to allow for use of rust code alongside inline asm.
451// Because calling a function increases the stack pointer, we have to check for a kernel
452// stack overflow and adjust the stack pointer before we branch
453core::arch::global_asm!(
454    "
455        .section .hard_fault_handler_arm_v7m, \"ax\"
456        .global hard_fault_handler_arm_v7m
457        .thumb_func
458    hard_fault_handler_arm_v7m:
459        mov    r2, 0     // r2 = 0
460        tst    lr, #4    // bitwise AND link register to 0b100
461        itte   eq        // if lr==4, run next two instructions, else, run 3rd instruction.
462        mrseq  r0, msp   // r0 = kernel stack pointer
463        addeq  r2, 1     // r2 = 1, kernel was executing
464        mrsne  r0, psp   // r0 = userland stack pointer
465        // Need to determine if we had a stack overflow before we push anything
466        // on to the stack. We check this by looking at the BusFault Status
467        // Register's (BFSR) `LSPERR` and `STKERR` bits to see if the hardware
468        // had any trouble stacking important registers to the stack during the
469        // fault. If so, then we cannot use this stack while handling this fault
470        // or we will trigger another fault.
471        ldr   r3, =0xE000ED29  // SCB BFSR register address
472        ldrb  r3, [r3]         // r3 = BFSR
473        tst   r3, #0x30        // r3 = BFSR & 0b00110000; LSPERR & STKERR bits
474        ite   ne               // check if the result of that bitwise AND was not 0
475        movne r1, #1           // BFSR & 0b00110000 != 0; r1 = 1
476        moveq r1, #0           // BFSR & 0b00110000 == 0; r1 = 0
477        and r5, r2, r1         // bitwise and r1 and r2, store in r5
478        cmp  r5, #1            //  update condition codes to reflect if r1 == 1 && r2 == 1
479        itt  eq                // if r5==1 run the next 2 instructions, else skip to branch
480        // if true, The hardware couldn't use the stack, so we have no saved data and
481        // we cannot use the kernel stack as is. We just want to report that
482        // the kernel's stack overflowed, since that is essential for
483        // debugging.
484        //
485        // To make room for a panic!() handler stack, we just re-use the
486        // kernel's original stack. This should in theory leave the bottom
487        // of the stack where the problem occurred untouched should one want
488        // to further debug.
489        ldreq  r4, ={estack} // load _estack into r4
490        moveq  sp, r4        // Set the stack pointer to _estack
491        // finally, if the fault occurred in privileged mode (r2 == 1), branch
492        // to non-naked handler.
493        cmp r2, #0
494        // Per ARM calling convention, faulting stack is passed in r0, whether
495        // there was a stack overflow in r1. This function must never return.
496        bne {kernel_hard_fault_handler} // branch to kernel hard fault handler
497        // Otherwise, the hard fault occurred in userspace. In this case, read
498        // the relevant SCB registers:
499        ldr r0, =SCB_REGISTERS    // Global variable address
500        ldr r1, =0xE000ED14       // SCB CCR register address
501        ldr r2, [r1, #0]          // CCR
502        str r2, [r0, #0]
503        ldr r2, [r1, #20]         // CFSR
504        str r2, [r0, #4]
505        ldr r2, [r1, #24]         // HFSR
506        str r2, [r0, #8]
507        ldr r2, [r1, #32]         // MMFAR
508        str r2, [r0, #12]
509        ldr r2, [r1, #36]         // BFAR
510        str r2, [r0, #16]
511
512        ldr r0, =APP_HARD_FAULT  // Global variable address
513        mov r1, #1               // r1 = 1
514        str r1, [r0, #0]         // APP_HARD_FAULT = 1
515
516        // Set thread mode to privileged
517        mov r0, #0
518        msr CONTROL, r0
519        // CONTROL writes must be followed by ISB
520        // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
521        isb
522
523        // The link register is set to the `EXC_RETURN` value on exception
524        // entry. To ensure we continue executing in the kernel we ensure the
525        // SPSEL bit is set to 0 to use the main (kernel) stack.
526        bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
527
528        bx lr",
529    estack = sym _estack,
530    kernel_hard_fault_handler = sym hard_fault_handler_arm_v7m_kernel,
531);
532
533// Table 2.5
534// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0553a/CHDBIBGJ.html
535pub fn ipsr_isr_number_to_str(isr_number: usize) -> &'static str {
536    match isr_number {
537        0 => "Thread Mode",
538        1 => "Reserved",
539        2 => "NMI",
540        3 => "HardFault",
541        4 => "MemManage",
542        5 => "BusFault",
543        6 => "UsageFault",
544        7..=10 => "Reserved",
545        11 => "SVCall",
546        12 => "Reserved for Debug",
547        13 => "Reserved",
548        14 => "PendSV",
549        15 => "SysTick",
550        16..=255 => "IRQn",
551        _ => "(Unknown! Illegal value?)",
552    }
553}
554
555///////////////////////////////////////////////////////////////////
556// Mock implementations for running tests on CI.
557//
558// Since tests run on the local architecture, we have to remove any
559// ARM assembly since it will not compile.
560///////////////////////////////////////////////////////////////////
561
562#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
563pub unsafe extern "C" fn systick_handler_arm_v7m() {
564    unimplemented!()
565}
566
567#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
568pub unsafe extern "C" fn svc_handler_arm_v7m() {
569    unimplemented!()
570}
571
572#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
573pub unsafe extern "C" fn generic_isr_arm_v7m() {
574    unimplemented!()
575}
576
577#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
578pub unsafe extern "C" fn switch_to_user_arm_v7m(
579    _user_stack: *const u8,
580    _process_regs: &mut [usize; 8],
581) -> *const usize {
582    unimplemented!()
583}
584
585#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
586pub unsafe extern "C" fn hard_fault_handler_arm_v7m() {
587    unimplemented!()
588}