cortexv7m/
lib.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2024.
4
5//! Generic support for all Cortex-M platforms.
6
7#![crate_name = "cortexv7m"]
8#![crate_type = "rlib"]
9#![no_std]
10
11// These constants are defined in the linker script.
12extern "C" {
13    static _estack: u8;
14    static _sstack: u8;
15}
16
17#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
18extern "C" {
19    /// ARMv7-M systick handler function.
20    ///
21    /// For documentation of this function, please see
22    /// `CortexMVariant::SYSTICK_HANDLER`.
23    pub fn systick_handler_arm_v7m();
24}
25
26#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
27core::arch::global_asm!(
28    "
29    .section .systick_handler_arm_v7m, \"ax\"
30    .global systick_handler_arm_v7m
31    .thumb_func
32  systick_handler_arm_v7m:
33    // Use the CONTROL register to set the thread mode to privileged to switch
34    // back to kernel mode.
35    //
36    // CONTROL[1]: Stack status
37    //   0 = Default stack (MSP) is used
38    //   1 = Alternate stack is used
39    // CONTROL[0]: Mode
40    //   0 = Privileged in thread mode
41    //   1 = User state in thread mode
42    mov r0, #0                        // r0 = 0
43    msr CONTROL, r0                   // CONTROL = 0
44    // CONTROL writes must be followed by an Instruction Synchronization Barrier
45    // (ISB). https://developer.arm.com/documentation/dai0321/latest
46    isb                               // synchronization barrier
47
48    // The link register is set to the `EXC_RETURN` value on exception entry. To
49    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
50    // to 0 to use the main (kernel) stack.
51    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
52
53    // This will resume in the switch_to_user function where application state
54    // is saved and the scheduler can choose what to do next.
55    bx lr
56    "
57);
58
59#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
60extern "C" {
61    /// Handler of `svc` instructions on ARMv7-M.
62    ///
63    /// For documentation of this function, please see
64    /// `CortexMVariant::SVC_HANDLER`.
65    pub fn svc_handler_arm_v7m();
66}
67
68#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
69core::arch::global_asm!(
70    "
71    .section .svc_handler_arm_v7m, \"ax\"
72    .global svc_handler_arm_v7m
73    .thumb_func
74  svc_handler_arm_v7m:
75    // First check to see which direction we are going in. If the link register
76    // (containing EXC_RETURN) has a 1 in the SPSEL bit (meaning the
77    // alternative/process stack was in use) then we are coming from a process
78    // which has called a syscall.
79    ubfx r0, lr, #2, #1               // r0 = (LR & (0x1<<2)) >> 2
80    cmp r0, #0                        // r0 (SPSEL bit) =≟ 0
81    bne 100f // to_kernel             // if SPSEL == 1, jump to to_kernel
82
83    // If we get here, then this is a context switch from the kernel to the
84    // application. Use the CONTROL register to set the thread mode to
85    // unprivileged to run the application.
86    //
87    // CONTROL[1]: Stack status
88    //   0 = Default stack (MSP) is used
89    //   1 = Alternate stack is used
90    // CONTROL[0]: Mode
91    //   0 = Privileged in thread mode
92    //   1 = User state in thread mode
93    mov r0, #1                        // r0 = 1
94    msr CONTROL, r0                   // CONTROL = 1
95    // CONTROL writes must be followed by an Instruction Synchronization Barrier
96    // (ISB). https://developer.arm.com/documentation/dai0321/latest
97    isb
98
99    // The link register is set to the `EXC_RETURN` value on exception entry. To
100    // ensure we execute using the process stack we set the SPSEL bit to 1
101    // to use the alternate (process) stack.
102    orr lr, lr, #4                    // LR = LR | 0b100
103
104    // Switch to the app.
105    bx lr
106
107  100: // to_kernel
108    // An application called a syscall. We mark this in the global variable
109    // `SYSCALL_FIRED` which is stored in the syscall file.
110    // `UserspaceKernelBoundary` will use this variable to decide why the app
111    // stopped executing.
112    ldr r0, =SYSCALL_FIRED            // r0 = &SYSCALL_FIRED
113    mov r1, #1                        // r1 = 1
114    str r1, [r0]                      // *SYSCALL_FIRED = 1
115
116    // Use the CONTROL register to set the thread mode to privileged to switch
117    // back to kernel mode.
118    //
119    // CONTROL[1]: Stack status
120    //   0 = Default stack (MSP) is used
121    //   1 = Alternate stack is used
122    // CONTROL[0]: Mode
123    //   0 = Privileged in thread mode
124    //   1 = User state in thread mode
125    mov r0, #0                        // r0 = 0
126    msr CONTROL, r0                   // CONTROL = 0
127    // CONTROL writes must be followed by an Instruction Synchronization Barrier
128    // (ISB). https://developer.arm.com/documentation/dai0321/latest
129    isb
130
131    // The link register is set to the `EXC_RETURN` value on exception entry. To
132    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
133    // to 0 to use the main (kernel) stack.
134    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
135
136    // Return to the kernel.
137    bx lr
138    "
139);
140
141#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
142extern "C" {
143    /// Generic interrupt handler for ARMv7-M instruction sets.
144    ///
145    /// For documentation of this function, see `CortexMVariant::GENERIC_ISR`.
146    pub fn generic_isr_arm_v7m();
147}
148#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
149core::arch::global_asm!(
150        "
151    .section .generic_isr_arm_v7m, \"ax\"
152    .global generic_isr_arm_v7m
153    .thumb_func
154  generic_isr_arm_v7m:
155    // Use the CONTROL register to set the thread mode to privileged to ensure
156    // we are executing as the kernel. This may be redundant if the interrupt
157    // happened while the kernel code was executing.
158    //
159    // CONTROL[1]: Stack status
160    //   0 = Default stack (MSP) is used
161    //   1 = Alternate stack is used
162    // CONTROL[0]: Mode
163    //   0 = Privileged in thread mode
164    //   1 = User state in thread mode
165    mov r0, #0                        // r0 = 0
166    msr CONTROL, r0                   // CONTROL = 0
167    // CONTROL writes must be followed by an Instruction Synchronization Barrier
168    // (ISB). https://developer.arm.com/documentation/dai0321/latest
169    isb
170
171    // Now need to disable the interrupt that fired in the NVIC to ensure it
172    // does not trigger again before the scheduler has a chance to handle it. We
173    // do this here in assembly for performance.
174    //
175    // The general idea is:
176    // 1. Get the index of the interrupt that occurred.
177    // 2. Set the disable bit for that interrupt in the NVIC.
178
179    // Find the ISR number (`index`) by looking at the low byte of the IPSR
180    // registers.
181    mrs r0, IPSR                      // r0 = Interrupt Program Status Register (IPSR)
182    and r0, #0xff                     // r0 = r0 & 0xFF; Get lowest 8 bits
183    sub r0, #16                       // r0 = r0 - 16;   ISRs start at 16, so subtract 16 to get zero-indexed.
184
185    // Now disable that interrupt in the NVIC.
186    // High level:
187    //    r0 = index
188    //    NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
189    lsrs r2, r0, #5                   // r2 = r0 / 32
190    // r0 = 1 << (r0 & 31)
191    movs r3, #1                       // r3 = 1
192    and r0, r0, #31                   // r0 = r0 & 31
193    lsl r0, r3, r0                    // r0 = r3 << r0
194
195    // Load the ICER register address.
196    ldr r3, =0xe000e180               // r3 = &NVIC.ICER
197
198    // Here:
199    // - `r2` is index / 32
200    // - `r3` is &NVIC.ICER
201    // - `r0` is 1 << (index & 31)
202    str r0, [r3, r2, lsl #2]          // *(r3 + r2 * 4) = r0
203
204    // The pending bit in ISPR might be reset by hardware for pulse interrupts
205    // at this point. So set it here again so the interrupt does not get lost in
206    // `service_pending_interrupts()`.
207    ldr r3, =0xe000e200               // r3 = &NVIC.ISPR
208    str r0, [r3, r2, lsl #2]          // *(r3 + r2 * 4) = r0
209
210    // The link register is set to the `EXC_RETURN` value on exception entry. To
211    // ensure we continue executing in the kernel we ensure the SPSEL bit is set
212    // to 0 to use the main (kernel) stack.
213    bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
214
215    // Now we can return from the interrupt context and resume what we were
216    // doing. If an app was executing we will switch to the kernel so it can
217    // choose whether to service the interrupt.
218    bx lr
219    ");
220
221/// Assembly function to switch into userspace and store/restore application
222/// state.
223///
224/// For documentation of this function, please see
225/// `CortexMVariant::switch_to_user`.
226#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
227pub unsafe fn switch_to_user_arm_v7m(
228    mut user_stack: *const usize,
229    process_regs: &mut [usize; 8],
230) -> *const usize {
231    use core::arch::asm;
232    asm!(
233    "
234    // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
235    // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
236    // the frame pointer. However, in the process of restoring and saving the
237    // process's registers, we do in fact clobber r6, r7 and r9. So, we work
238    // around this by doing our own manual saving of r6 using r2, r7 using r3,
239    // r9 using r12, and then mark those as clobbered.
240    mov r2, r6                        // r2 = r6
241    mov r3, r7                        // r3 = r7
242    mov r12, r9                       // r12 = r9
243
244    // The arguments passed in are:
245    // - `r0` is the bottom of the user stack
246    // - `r1` is a reference to `CortexMStoredState.regs`
247
248    // Load bottom of stack into Process Stack Pointer.
249    msr psp, r0                       // PSP = r0
250
251    // Load non-hardware-stacked registers from the process stored state. Ensure
252    // that the address register (right now r1) is stored in a callee saved
253    // register.
254    ldmia r1, {{r4-r11}}              // r4 = r1[0], r5 = r1[1], ...
255
256    // Generate a SVC exception to handle the context switch from kernel to
257    // userspace. It doesn't matter which SVC number we use here as it is not
258    // used in the exception handler. Data being returned from a syscall is
259    // transferred on the app's stack.
260    svc 0xff
261
262    // When execution returns here we have switched back to the kernel from the
263    // application.
264
265    // Push non-hardware-stacked registers into the saved state for the
266    // application.
267    stmia r1, {{r4-r11}}              // r1[0] = r4, r1[1] = r5, ...
268
269    // Update the user stack pointer with the current value after the
270    // application has executed.
271    mrs r0, PSP                       // r0 = PSP
272
273    // Need to restore r6, r7 and r12 since we clobbered them when switching to
274    // and from the app.
275    mov r6, r2                        // r6 = r2
276    mov r7, r3                        // r7 = r3
277    mov r9, r12                       // r9 = r12
278    ",
279    inout("r0") user_stack,
280    in("r1") process_regs,
281    out("r2") _, out("r3") _, out("r4") _, out("r5") _, out("r8") _, out("r10") _,
282    out("r11") _, out("r12") _);
283
284    user_stack
285}
286
287#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
288/// Continue the hardfault handler for all hard-faults that occurred
289/// during kernel execution. This function must never return.
290unsafe extern "C" fn hard_fault_handler_arm_v7m_kernel(
291    faulting_stack: *mut u32,
292    stack_overflow: u32,
293) -> ! {
294    if stack_overflow != 0 {
295        // Panic to show the correct error.
296        panic!("kernel stack overflow");
297    } else {
298        // Show the normal kernel hardfault message.
299        let stacked_r0: u32 = *faulting_stack.offset(0);
300        let stacked_r1: u32 = *faulting_stack.offset(1);
301        let stacked_r2: u32 = *faulting_stack.offset(2);
302        let stacked_r3: u32 = *faulting_stack.offset(3);
303        let stacked_r12: u32 = *faulting_stack.offset(4);
304        let stacked_lr: u32 = *faulting_stack.offset(5);
305        let stacked_pc: u32 = *faulting_stack.offset(6);
306        let stacked_xpsr: u32 = *faulting_stack.offset(7);
307
308        let mode_str = "Kernel";
309
310        let shcsr: u32 = core::ptr::read_volatile(0xE000ED24 as *const u32);
311        let cfsr: u32 = core::ptr::read_volatile(0xE000ED28 as *const u32);
312        let hfsr: u32 = core::ptr::read_volatile(0xE000ED2C as *const u32);
313        let mmfar: u32 = core::ptr::read_volatile(0xE000ED34 as *const u32);
314        let bfar: u32 = core::ptr::read_volatile(0xE000ED38 as *const u32);
315
316        let iaccviol = (cfsr & 0x01) == 0x01;
317        let daccviol = (cfsr & 0x02) == 0x02;
318        let munstkerr = (cfsr & 0x08) == 0x08;
319        let mstkerr = (cfsr & 0x10) == 0x10;
320        let mlsperr = (cfsr & 0x20) == 0x20;
321        let mmfarvalid = (cfsr & 0x80) == 0x80;
322
323        let ibuserr = ((cfsr >> 8) & 0x01) == 0x01;
324        let preciserr = ((cfsr >> 8) & 0x02) == 0x02;
325        let impreciserr = ((cfsr >> 8) & 0x04) == 0x04;
326        let unstkerr = ((cfsr >> 8) & 0x08) == 0x08;
327        let stkerr = ((cfsr >> 8) & 0x10) == 0x10;
328        let lsperr = ((cfsr >> 8) & 0x20) == 0x20;
329        let bfarvalid = ((cfsr >> 8) & 0x80) == 0x80;
330
331        let undefinstr = ((cfsr >> 16) & 0x01) == 0x01;
332        let invstate = ((cfsr >> 16) & 0x02) == 0x02;
333        let invpc = ((cfsr >> 16) & 0x04) == 0x04;
334        let nocp = ((cfsr >> 16) & 0x08) == 0x08;
335        let unaligned = ((cfsr >> 16) & 0x100) == 0x100;
336        let divbysero = ((cfsr >> 16) & 0x200) == 0x200;
337
338        let vecttbl = (hfsr & 0x02) == 0x02;
339        let forced = (hfsr & 0x40000000) == 0x40000000;
340
341        let ici_it = (((stacked_xpsr >> 25) & 0x3) << 6) | ((stacked_xpsr >> 10) & 0x3f);
342        let thumb_bit = ((stacked_xpsr >> 24) & 0x1) == 1;
343        let exception_number = (stacked_xpsr & 0x1ff) as usize;
344
345        panic!(
346            "{} HardFault.\r\n\
347         \tKernel version {}\r\n\
348         \tr0  0x{:x}\r\n\
349         \tr1  0x{:x}\r\n\
350         \tr2  0x{:x}\r\n\
351         \tr3  0x{:x}\r\n\
352         \tr12 0x{:x}\r\n\
353         \tlr  0x{:x}\r\n\
354         \tpc  0x{:x}\r\n\
355         \tpsr 0x{:x} [ N {} Z {} C {} V {} Q {} GE {}{}{}{} ; ICI.IT {} T {} ; Exc {}-{} ]\r\n\
356         \tsp  0x{:x}\r\n\
357         \ttop of stack     0x{:x}\r\n\
358         \tbottom of stack  0x{:x}\r\n\
359         \tSHCSR 0x{:x}\r\n\
360         \tCFSR  0x{:x}\r\n\
361         \tHSFR  0x{:x}\r\n\
362         \tInstruction Access Violation:       {}\r\n\
363         \tData Access Violation:              {}\r\n\
364         \tMemory Management Unstacking Fault: {}\r\n\
365         \tMemory Management Stacking Fault:   {}\r\n\
366         \tMemory Management Lazy FP Fault:    {}\r\n\
367         \tInstruction Bus Error:              {}\r\n\
368         \tPrecise Data Bus Error:             {}\r\n\
369         \tImprecise Data Bus Error:           {}\r\n\
370         \tBus Unstacking Fault:               {}\r\n\
371         \tBus Stacking Fault:                 {}\r\n\
372         \tBus Lazy FP Fault:                  {}\r\n\
373         \tUndefined Instruction Usage Fault:  {}\r\n\
374         \tInvalid State Usage Fault:          {}\r\n\
375         \tInvalid PC Load Usage Fault:        {}\r\n\
376         \tNo Coprocessor Usage Fault:         {}\r\n\
377         \tUnaligned Access Usage Fault:       {}\r\n\
378         \tDivide By Zero:                     {}\r\n\
379         \tBus Fault on Vector Table Read:     {}\r\n\
380         \tForced Hard Fault:                  {}\r\n\
381         \tFaulting Memory Address: (valid: {}) {:#010X}\r\n\
382         \tBus Fault Address:       (valid: {}) {:#010X}\r\n\
383         ",
384            mode_str,
385            option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
386            stacked_r0,
387            stacked_r1,
388            stacked_r2,
389            stacked_r3,
390            stacked_r12,
391            stacked_lr,
392            stacked_pc,
393            stacked_xpsr,
394            (stacked_xpsr >> 31) & 0x1,
395            (stacked_xpsr >> 30) & 0x1,
396            (stacked_xpsr >> 29) & 0x1,
397            (stacked_xpsr >> 28) & 0x1,
398            (stacked_xpsr >> 27) & 0x1,
399            (stacked_xpsr >> 19) & 0x1,
400            (stacked_xpsr >> 18) & 0x1,
401            (stacked_xpsr >> 17) & 0x1,
402            (stacked_xpsr >> 16) & 0x1,
403            ici_it,
404            thumb_bit,
405            exception_number,
406            ipsr_isr_number_to_str(exception_number),
407            faulting_stack as u32,
408            core::ptr::addr_of!(_estack) as u32,
409            core::ptr::addr_of!(_sstack) as u32,
410            shcsr,
411            cfsr,
412            hfsr,
413            iaccviol,
414            daccviol,
415            munstkerr,
416            mstkerr,
417            mlsperr,
418            ibuserr,
419            preciserr,
420            impreciserr,
421            unstkerr,
422            stkerr,
423            lsperr,
424            undefinstr,
425            invstate,
426            invpc,
427            nocp,
428            unaligned,
429            divbysero,
430            vecttbl,
431            forced,
432            mmfarvalid,
433            mmfar,
434            bfarvalid,
435            bfar
436        );
437    }
438}
439
440#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
441extern "C" {
442    /// ARMv7-M hardfault handler.
443    ///
444    /// For documentation of this function, please see
445    /// `CortexMVariant::HARD_FAULT_HANDLER_HANDLER`.
446    pub fn hard_fault_handler_arm_v7m();
447}
448
449#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
450// First need to determine if this a kernel fault or a userspace fault, and store
451// the unmodified stack pointer. Place these values in registers, then call
452// a non-naked function, to allow for use of rust code alongside inline asm.
453// Because calling a function increases the stack pointer, we have to check for a kernel
454// stack overflow and adjust the stack pointer before we branch
455core::arch::global_asm!(
456    "
457        .section .hard_fault_handler_arm_v7m, \"ax\"
458        .global hard_fault_handler_arm_v7m
459        .thumb_func
460    hard_fault_handler_arm_v7m:
461        mov    r2, 0     // r2 = 0
462        tst    lr, #4    // bitwise AND link register to 0b100
463        itte   eq        // if lr==4, run next two instructions, else, run 3rd instruction.
464        mrseq  r0, msp   // r0 = kernel stack pointer
465        addeq  r2, 1     // r2 = 1, kernel was executing
466        mrsne  r0, psp   // r0 = userland stack pointer
467        // Need to determine if we had a stack overflow before we push anything
468        // on to the stack. We check this by looking at the BusFault Status
469        // Register's (BFSR) `LSPERR` and `STKERR` bits to see if the hardware
470        // had any trouble stacking important registers to the stack during the
471        // fault. If so, then we cannot use this stack while handling this fault
472        // or we will trigger another fault.
473        ldr   r3, =0xE000ED29  // SCB BFSR register address
474        ldrb  r3, [r3]         // r3 = BFSR
475        tst   r3, #0x30        // r3 = BFSR & 0b00110000; LSPERR & STKERR bits
476        ite   ne               // check if the result of that bitwise AND was not 0
477        movne r1, #1           // BFSR & 0b00110000 != 0; r1 = 1
478        moveq r1, #0           // BFSR & 0b00110000 == 0; r1 = 0
479        and r5, r2, r1         // bitwise and r1 and r2, store in r5
480        cmp  r5, #1            //  update condition codes to reflect if r1 == 1 && r2 == 1
481        itt  eq                // if r5==1 run the next 2 instructions, else skip to branch
482        // if true, The hardware couldn't use the stack, so we have no saved data and
483        // we cannot use the kernel stack as is. We just want to report that
484        // the kernel's stack overflowed, since that is essential for
485        // debugging.
486        //
487        // To make room for a panic!() handler stack, we just re-use the
488        // kernel's original stack. This should in theory leave the bottom
489        // of the stack where the problem occurred untouched should one want
490        // to further debug.
491        ldreq  r4, ={estack} // load _estack into r4
492        moveq  sp, r4        // Set the stack pointer to _estack
493        // finally, if the fault occurred in privileged mode (r2 == 1), branch
494        // to non-naked handler.
495        cmp r2, #0
496        // Per ARM calling convention, faulting stack is passed in r0, whether
497        // there was a stack overflow in r1. This function must never return.
498        bne {kernel_hard_fault_handler} // branch to kernel hard fault handler
499        // Otherwise, the hard fault occurred in userspace. In this case, read
500        // the relevant SCB registers:
501        ldr r0, =SCB_REGISTERS    // Global variable address
502        ldr r1, =0xE000ED14       // SCB CCR register address
503        ldr r2, [r1, #0]          // CCR
504        str r2, [r0, #0]
505        ldr r2, [r1, #20]         // CFSR
506        str r2, [r0, #4]
507        ldr r2, [r1, #24]         // HFSR
508        str r2, [r0, #8]
509        ldr r2, [r1, #32]         // MMFAR
510        str r2, [r0, #12]
511        ldr r2, [r1, #36]         // BFAR
512        str r2, [r0, #16]
513
514        ldr r0, =APP_HARD_FAULT  // Global variable address
515        mov r1, #1               // r1 = 1
516        str r1, [r0, #0]         // APP_HARD_FAULT = 1
517
518        // Set thread mode to privileged
519        mov r0, #0
520        msr CONTROL, r0
521        // CONTROL writes must be followed by ISB
522        // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
523        isb
524
525        // The link register is set to the `EXC_RETURN` value on exception
526        // entry. To ensure we continue executing in the kernel we ensure the
527        // SPSEL bit is set to 0 to use the main (kernel) stack.
528        bfc lr, #2, #1                    // LR = LR & !(0x1<<2)
529
530        bx lr",
531    estack = sym _estack,
532    kernel_hard_fault_handler = sym hard_fault_handler_arm_v7m_kernel,
533);
534
535// Table 2.5
536// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0553a/CHDBIBGJ.html
537pub fn ipsr_isr_number_to_str(isr_number: usize) -> &'static str {
538    match isr_number {
539        0 => "Thread Mode",
540        1 => "Reserved",
541        2 => "NMI",
542        3 => "HardFault",
543        4 => "MemManage",
544        5 => "BusFault",
545        6 => "UsageFault",
546        7..=10 => "Reserved",
547        11 => "SVCall",
548        12 => "Reserved for Debug",
549        13 => "Reserved",
550        14 => "PendSV",
551        15 => "SysTick",
552        16..=255 => "IRQn",
553        _ => "(Unknown! Illegal value?)",
554    }
555}
556
557///////////////////////////////////////////////////////////////////
558// Mock implementations for running tests on CI.
559//
560// Since tests run on the local architecture, we have to remove any
561// ARM assembly since it will not compile.
562///////////////////////////////////////////////////////////////////
563
564#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
565pub unsafe extern "C" fn systick_handler_arm_v7m() {
566    unimplemented!()
567}
568
569#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
570pub unsafe extern "C" fn svc_handler_arm_v7m() {
571    unimplemented!()
572}
573
574#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
575pub unsafe extern "C" fn generic_isr_arm_v7m() {
576    unimplemented!()
577}
578
579#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
580pub unsafe extern "C" fn switch_to_user_arm_v7m(
581    _user_stack: *const u8,
582    _process_regs: &mut [usize; 8],
583) -> *const usize {
584    unimplemented!()
585}
586
587#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
588pub unsafe extern "C" fn hard_fault_handler_arm_v7m() {
589    unimplemented!()
590}