cortexm0/
lib.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Shared implementations for ARM Cortex-M0 MCUs.
6
7#![no_std]
8
9use core::fmt::Write;
10
11// Re-export the base generic cortex-m functions here as they are
12// valid on cortex-m0.
13pub use cortexm::support;
14
15pub use cortexm::nvic;
16pub use cortexm::syscall;
17
18#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
19struct HardFaultStackedRegisters {
20    r0: u32,
21    r1: u32,
22    r2: u32,
23    r3: u32,
24    r12: u32,
25    lr: u32,
26    pc: u32,
27    xpsr: u32,
28}
29
30#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
31/// Handle a hard fault that occurred in the kernel. This function is invoked
32/// by the naked hard_fault_handler function.
33unsafe extern "C" fn hard_fault_handler_kernel(faulting_stack: *mut u32) -> ! {
34    let hardfault_stacked_registers = HardFaultStackedRegisters {
35        r0: *faulting_stack.offset(0),
36        r1: *faulting_stack.offset(1),
37        r2: *faulting_stack.offset(2),
38        r3: *faulting_stack.offset(3),
39        r12: *faulting_stack.offset(4),
40        lr: *faulting_stack.offset(5),
41        pc: *faulting_stack.offset(6),
42        xpsr: *faulting_stack.offset(7),
43    };
44
45    panic!(
46        "Kernel HardFault.\r\n\
47         \tKernel version {}\r\n\
48         \tr0  0x{:x}\r\n\
49         \tr1  0x{:x}\r\n\
50         \tr2  0x{:x}\r\n\
51         \tr3  0x{:x}\r\n\
52         \tr12  0x{:x}\r\n\
53         \tlr  0x{:x}\r\n\
54         \tpc  0x{:x}\r\n\
55         \txpsr  0x{:x}\r\n\
56         ",
57        option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
58        hardfault_stacked_registers.r0,
59        hardfault_stacked_registers.r1,
60        hardfault_stacked_registers.r2,
61        hardfault_stacked_registers.r3,
62        hardfault_stacked_registers.r12,
63        hardfault_stacked_registers.lr,
64        hardfault_stacked_registers.pc,
65        hardfault_stacked_registers.xpsr
66    );
67}
68
69// Mock implementation for tests on Travis-CI.
70#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
71unsafe extern "C" fn generic_isr() {
72    unimplemented!()
73}
74
75#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
76#[unsafe(naked)]
77/// All ISRs are caught by this handler which disables the NVIC and switches to the kernel.
78unsafe extern "C" fn generic_isr() {
79    use core::arch::naked_asm;
80    naked_asm!(
81        "
82    // Skip saving process state if not coming from user-space
83    ldr r0, 300f // MEXC_RETURN_PSP
84    cmp lr, r0
85    bne 100f
86
87    // We need to make sure the kernel continues the execution after this ISR
88    movs r0, #0
89    msr CONTROL, r0
90    // CONTROL writes must be followed by ISB
91    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
92    isb
93
94    // We need the most recent kernel's version of r1, which points
95    // to the Process struct's stored registers field. The kernel's r1
96    // lives in the second word of the hardware stacked registers on MSP
97    mov r1, sp
98    ldr r1, [r1, #4]
99    str r4, [r1, #16]
100    str r5, [r1, #20]
101    str r6, [r1, #24]
102    str r7, [r1, #28]
103
104    push {{r4-r7}}
105    mov  r4, r8
106    mov  r5, r9
107    mov  r6, r10
108    mov  r7, r11
109    str r4, [r1, #0]
110    str r5, [r1, #4]
111    str r6, [r1, #8]
112    str r7, [r1, #12]
113    pop {{r4-r7}}
114
115    ldr r0, 200f // MEXC_RETURN_MSP
116    mov lr, r0
117100: // _ggeneric_isr_no_stacking
118    // Find the ISR number by looking at the low byte of the IPSR registers
119    mrs r0, IPSR
120    movs r1, #0xff
121    ands r0, r1
122    // ISRs start at 16, so subtract 16 to get zero-indexed
123    subs r0, r0, #16
124
125    // High level:
126    //    NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
127    //
128    // r3 = &NVIC.ICER[r0 / 32]
129    ldr r2, 101f      // r2 = &NVIC.ICER
130    lsrs r3, r0, #5   // r3 = r0 / 32
131    lsls r3, r3, #2   // ICER is word-sized, so multiply offset by 4
132    adds r3, r3, r2   // r3 = r2 + r3
133
134    // r2 = 1 << (r0 & 31)
135    movs r2, #31      // r2 = 31
136    ands r0, r2       // r0 = r0 & r2
137    subs r2, r2, #30  // r2 = r2 - 30 i.e. r2 = 1
138    lsls r2, r2, r0   // r2 = 1 << r0
139
140    // *r3 = r2
141    str r2, [r3]
142
143    // The pending bit in ISPR might be reset by hardware for pulse interrupts
144    // at this point. So set it here again so the interrupt does not get lost
145    // in service_pending_interrupts()
146    //
147    // The NVIC.ISPR base is 0xE000E200, which is 0x20 (aka #32) above the
148    // NVIC.ICER base.  Calculate the ISPR address by offsetting from the ICER
149    // address so as to avoid re-doing the [r0 / 32] index math.
150    adds r3, #32
151    str r2, [r3]
152
153    bx lr // return here since we have extra words in the assembly
154
155.align 4
156101: // NVICICER
157    .word 0xE000E180
158200: // MEXC_RETURN_MSP
159    .word 0xFFFFFFF9
160300: // MEXC_RETURN_PSP
161    .word 0xFFFFFFFD
162        "
163    );
164}
165
166// Mock implementation for tests on Travis-CI.
167#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
168unsafe extern "C" fn systick_handler() {
169    unimplemented!()
170}
171
172/// The `systick_handler` is called when the systick interrupt occurs, signaling
173/// that an application executed for longer than its timeslice. This interrupt
174/// handler is no longer responsible for signaling to the kernel thread that an
175/// interrupt has occurred, but is slightly more efficient than the
176/// `generic_isr` handler on account of not needing to mark the interrupt as
177/// pending.
178#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
179#[unsafe(naked)]
180unsafe extern "C" fn systick_handler() {
181    use core::arch::naked_asm;
182    naked_asm!(
183        "
184    // Set thread mode to privileged to switch back to kernel mode.
185    movs r0, #0
186    msr CONTROL, r0
187    // CONTROL writes must be followed by ISB
188    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
189    isb
190
191    ldr r0, 100f // ST_EXC_RETURN_MSP
192
193    // This will resume in the switch to user function where application state
194    // is saved and the scheduler can choose what to do next.
195    bx   r0
196    .align 4
197100: // ST_EXC_RETURN_MSP
198    .word 0xFFFFFFF9
199        "
200    );
201}
202
203// Mock implementation for tests on Travis-CI.
204#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
205unsafe extern "C" fn svc_handler() {
206    unimplemented!()
207}
208
209#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
210#[unsafe(naked)]
211unsafe extern "C" fn svc_handler() {
212    use core::arch::naked_asm;
213    naked_asm!(
214        "
215    ldr r0, 200f // EXC_RETURN_MSP
216    cmp lr, r0
217    bne 100f
218    movs r0, #1
219    msr CONTROL, r0
220    // CONTROL writes must be followed by ISB
221    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
222    isb
223    ldr r1, 300f // EXC_RETURN_PSP
224    bx r1
225
226100: // to_kernel
227    movs r0, #0
228    msr CONTROL, r0
229    // CONTROL writes must be followed by ISB
230    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
231    isb
232    ldr r0, =SYSCALL_FIRED
233    movs r1, #1
234    str r1, [r0, #0]
235    ldr r1, 200f
236    bx r1
237
238    .align 4
239200: // EXC_RETURN_MSP
240    .word 0xFFFFFFF9
241300: // EXC_RETURN_PSP
242    .word 0xFFFFFFFD
243        "
244    );
245}
246
247// Mock implementation for tests on Travis-CI.
248#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
249unsafe extern "C" fn hard_fault_handler() {
250    unimplemented!()
251}
252
253#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
254#[unsafe(naked)]
255unsafe extern "C" fn hard_fault_handler() {
256    use core::arch::naked_asm;
257    // If `kernel_stack` is non-zero, then hard-fault occurred in
258    // kernel, otherwise the hard-fault occurred in user.
259    naked_asm!(
260        "
261    // Will be incremented to 1 when we determine that it was a fault
262    // in the kernel
263    movs r1, #0
264    // r2 is used for testing and r3 is used to store lr
265    mov r3, lr
266
267    movs r2, #4
268    tst r3, r2
269    beq 100f
270
271    // _hardfault_psp:
272    mrs r0, psp
273    b 200f
274
275100: // _hardfault_msp
276    mrs r0, msp
277    adds r1, #1
278
279200: // _hardfault_exit
280
281    // If the hard-fault occurred while executing the kernel (r1 != 0),
282    // jump to the non-naked kernel hard fault handler. This handler
283    // MUST NOT return. The faulting stack is passed as the first argument
284    // (r0).
285    cmp r1, #0                           // Check if app (r1==0) or kernel (r1==1) fault.
286    beq 400f                             // If app fault, skip to app handling.
287    ldr r2, ={kernel_hard_fault_handler} // Load address of fault handler.
288    bx r2                                // Branch to the non-naked fault handler.
289
290400: // _hardfault_app
291    // Otherwise, store that a hardfault occurred in an app, store some CPU
292    // state and finally return to the kernel stack:
293    ldr r0, =APP_HARD_FAULT
294    movs r1, #1 // Fault
295    str r1, [r0, #0]
296
297    // NOTE:
298    // -----
299    //
300    // Even though ARMv6-M SCB and Control registers
301    // are different from ARMv7-M, they are still compatible
302    // with each other. So, we can keep the same code as
303    // ARMv7-M.
304    //
305    // ARMv6-M however has no _privileged_ mode.
306
307    // Read the SCB registers.
308    ldr r0, =SCB_REGISTERS
309    ldr r1, =0xE000ED14
310    ldr r2, [r1, #0] // CCR
311    str r2, [r0, #0]
312    ldr r2, [r1, #20] // CFSR
313    str r2, [r0, #4]
314    ldr r2, [r1, #24] // HFSR
315    str r2, [r0, #8]
316    ldr r2, [r1, #32] // MMFAR
317    str r2, [r0, #12]
318    ldr r2, [r1, #36] // BFAR
319    str r2, [r0, #16]
320
321    // Set thread mode to privileged
322    movs r0, #0
323    msr CONTROL, r0
324    // No ISB required on M0
325    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
326
327    // Load the FEXC_RETURN_MSP LR address and return to it, to switch to the
328    // kernel (MSP) stack:
329    ldr r0, 300f
330    mov lr, r0
331    bx lr
332
333    .align 4
334300: // FEXC_RETURN_MSP
335    .word 0xFFFFFFF9
336        ",
337        kernel_hard_fault_handler = sym hard_fault_handler_kernel,
338    );
339}
340
341// Enum with no variants to ensure that this type is not instantiable. It is
342// only used to pass architecture-specific constants and functions via the
343// `CortexMVariant` trait.
344pub enum CortexM0 {}
345
346impl cortexm::CortexMVariant for CortexM0 {
347    const GENERIC_ISR: unsafe extern "C" fn() = generic_isr;
348    const SYSTICK_HANDLER: unsafe extern "C" fn() = systick_handler;
349    const SVC_HANDLER: unsafe extern "C" fn() = svc_handler;
350    const HARD_FAULT_HANDLER: unsafe extern "C" fn() = hard_fault_handler;
351
352    // Mock implementation for tests on Travis-CI.
353    #[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
354    unsafe fn switch_to_user(
355        _user_stack: *const usize,
356        _process_regs: &mut [usize; 8],
357    ) -> *const usize {
358        unimplemented!()
359    }
360
361    #[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
362    unsafe fn switch_to_user(
363        mut user_stack: *const usize,
364        process_regs: &mut [usize; 8],
365    ) -> *const usize {
366        use core::arch::asm;
367        asm!(
368            "
369    // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
370    // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
371    // the frame pointer. However, in the process of restoring and saving the
372    // process's registers, we do in fact clobber r6, r7 and r9. So, we work
373    // around this by doing our own manual saving of r6 using r2, r7 using r3,
374    // r9 using r12, and then mark those as clobbered.
375    mov r2, r6
376    mov r3, r7
377    mov r12, r9
378
379    // Load non-hardware-stacked registers from Process stack
380    ldmia r1!, {{r4-r7}}
381    mov r11, r7
382    mov r10, r6
383    mov r9,  r5
384    mov r8,  r4
385    ldmia r1!, {{r4-r7}}
386    subs r1, 32 // Restore pointer to process_regs
387                // ldmia! added a 32-byte offset
388
389    // Load bottom of stack into Process Stack Pointer
390    msr psp, r0
391
392    // SWITCH
393    svc 0xff // It doesn't matter which SVC number we use here
394
395    // Store non-hardware-stacked registers in process_regs
396    // r1 still points to process_regs because we are clobbering all
397    // non-hardware-stacked registers
398    str r4, [r1, #16]
399    str r5, [r1, #20]
400    str r6, [r1, #24]
401    str r7, [r1, #28]
402
403    mov  r4, r8
404    mov  r5, r9
405    mov  r6, r10
406    mov  r7, r11
407
408    str r4, [r1, #0]
409    str r5, [r1, #4]
410    str r6, [r1, #8]
411    str r7, [r1, #12]
412
413    mrs r0, PSP // PSP into user_stack
414
415    // Manually restore r6, r7 and r9.
416    mov r6, r2
417    mov r7, r3
418    mov r9, r12
419            ",
420            inout("r0") user_stack,
421            in("r1") process_regs,
422            out("r2") _,
423            out("r3") _,
424            out("r4") _,
425            out("r5") _,
426            out("r8") _,
427            out("r10") _,
428            out("r11") _,
429            out("r12") _,
430        );
431
432        user_stack
433    }
434
435    #[inline]
436    unsafe fn print_cortexm_state(writer: &mut dyn Write) {
437        cortexm::print_cortexm_state(writer)
438    }
439}