cortexm0/
lib.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Shared implementations for ARM Cortex-M0 MCUs.
6
7#![no_std]
8
9use core::fmt::Write;
10
11// Re-export the base generic cortex-m functions here as they are
12// valid on cortex-m0.
13pub use cortexm::support;
14
15pub use cortexm::nvic;
16pub use cortexm::syscall;
17pub use cortexm::thread_id;
18
19#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
20struct HardFaultStackedRegisters {
21    r0: u32,
22    r1: u32,
23    r2: u32,
24    r3: u32,
25    r12: u32,
26    lr: u32,
27    pc: u32,
28    xpsr: u32,
29}
30
31#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
32/// Handle a hard fault that occurred in the kernel. This function is invoked
33/// by the naked hard_fault_handler function.
34unsafe extern "C" fn hard_fault_handler_kernel(faulting_stack: *mut u32) -> ! {
35    let hardfault_stacked_registers = HardFaultStackedRegisters {
36        r0: *faulting_stack.offset(0),
37        r1: *faulting_stack.offset(1),
38        r2: *faulting_stack.offset(2),
39        r3: *faulting_stack.offset(3),
40        r12: *faulting_stack.offset(4),
41        lr: *faulting_stack.offset(5),
42        pc: *faulting_stack.offset(6),
43        xpsr: *faulting_stack.offset(7),
44    };
45
46    panic!(
47        "Kernel HardFault.\r\n\
48         \tr0  0x{:x}\r\n\
49         \tr1  0x{:x}\r\n\
50         \tr2  0x{:x}\r\n\
51         \tr3  0x{:x}\r\n\
52         \tr12  0x{:x}\r\n\
53         \tlr  0x{:x}\r\n\
54         \tpc  0x{:x}\r\n\
55         \txpsr  0x{:x}\r\n\
56         ",
57        hardfault_stacked_registers.r0,
58        hardfault_stacked_registers.r1,
59        hardfault_stacked_registers.r2,
60        hardfault_stacked_registers.r3,
61        hardfault_stacked_registers.r12,
62        hardfault_stacked_registers.lr,
63        hardfault_stacked_registers.pc,
64        hardfault_stacked_registers.xpsr
65    );
66}
67
68// Mock implementation for tests on Travis-CI.
69#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
70unsafe extern "C" fn generic_isr() {
71    unimplemented!()
72}
73
74#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
75#[unsafe(naked)]
76/// All ISRs are caught by this handler which disables the NVIC and switches to the kernel.
77unsafe extern "C" fn generic_isr() {
78    use core::arch::naked_asm;
79    naked_asm!(
80        "
81    // Skip saving process state if not coming from user-space
82    ldr r0, 300f // MEXC_RETURN_PSP
83    cmp lr, r0
84    bne 100f
85
86    // We need to make sure the kernel continues the execution after this ISR
87    movs r0, #0
88    msr CONTROL, r0
89    // CONTROL writes must be followed by ISB
90    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
91    isb
92
93    // We need the most recent kernel's version of r1, which points
94    // to the Process struct's stored registers field. The kernel's r1
95    // lives in the second word of the hardware stacked registers on MSP
96    mov r1, sp
97    ldr r1, [r1, #4]
98    str r4, [r1, #16]
99    str r5, [r1, #20]
100    str r6, [r1, #24]
101    str r7, [r1, #28]
102
103    push {{r4-r7}}
104    mov  r4, r8
105    mov  r5, r9
106    mov  r6, r10
107    mov  r7, r11
108    str r4, [r1, #0]
109    str r5, [r1, #4]
110    str r6, [r1, #8]
111    str r7, [r1, #12]
112    pop {{r4-r7}}
113
114    ldr r0, 200f // MEXC_RETURN_MSP
115    mov lr, r0
116100: // _ggeneric_isr_no_stacking
117    // Find the ISR number by looking at the low byte of the IPSR registers
118    mrs r0, IPSR
119    movs r1, #0xff
120    ands r0, r1
121    // ISRs start at 16, so subtract 16 to get zero-indexed
122    subs r0, r0, #16
123
124    // High level:
125    //    NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
126    //
127    // r3 = &NVIC.ICER[r0 / 32]
128    ldr r2, 101f      // r2 = &NVIC.ICER
129    lsrs r3, r0, #5   // r3 = r0 / 32
130    lsls r3, r3, #2   // ICER is word-sized, so multiply offset by 4
131    adds r3, r3, r2   // r3 = r2 + r3
132
133    // r2 = 1 << (r0 & 31)
134    movs r2, #31      // r2 = 31
135    ands r0, r2       // r0 = r0 & r2
136    subs r2, r2, #30  // r2 = r2 - 30 i.e. r2 = 1
137    lsls r2, r2, r0   // r2 = 1 << r0
138
139    // *r3 = r2
140    str r2, [r3]
141
142    // The pending bit in ISPR might be reset by hardware for pulse interrupts
143    // at this point. So set it here again so the interrupt does not get lost
144    // in service_pending_interrupts()
145    //
146    // The NVIC.ISPR base is 0xE000E200, which is 0x20 (aka #32) above the
147    // NVIC.ICER base.  Calculate the ISPR address by offsetting from the ICER
148    // address so as to avoid re-doing the [r0 / 32] index math.
149    adds r3, #32
150    str r2, [r3]
151
152    bx lr // return here since we have extra words in the assembly
153
154.align 4
155101: // NVICICER
156    .word 0xE000E180
157200: // MEXC_RETURN_MSP
158    .word 0xFFFFFFF9
159300: // MEXC_RETURN_PSP
160    .word 0xFFFFFFFD
161        "
162    );
163}
164
165// Mock implementation for tests on Travis-CI.
166#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
167unsafe extern "C" fn systick_handler() {
168    unimplemented!()
169}
170
171/// The `systick_handler` is called when the systick interrupt occurs, signaling
172/// that an application executed for longer than its timeslice. This interrupt
173/// handler is no longer responsible for signaling to the kernel thread that an
174/// interrupt has occurred, but is slightly more efficient than the
175/// `generic_isr` handler on account of not needing to mark the interrupt as
176/// pending.
177#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
178#[unsafe(naked)]
179unsafe extern "C" fn systick_handler() {
180    use core::arch::naked_asm;
181    naked_asm!(
182        "
183    // Set thread mode to privileged to switch back to kernel mode.
184    movs r0, #0
185    msr CONTROL, r0
186    // CONTROL writes must be followed by ISB
187    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
188    isb
189
190    ldr r0, 100f // ST_EXC_RETURN_MSP
191
192    // This will resume in the switch to user function where application state
193    // is saved and the scheduler can choose what to do next.
194    bx   r0
195    .align 4
196100: // ST_EXC_RETURN_MSP
197    .word 0xFFFFFFF9
198        "
199    );
200}
201
202// Mock implementation for tests on Travis-CI.
203#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
204unsafe extern "C" fn svc_handler() {
205    unimplemented!()
206}
207
208#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
209#[unsafe(naked)]
210unsafe extern "C" fn svc_handler() {
211    use core::arch::naked_asm;
212    naked_asm!(
213        "
214    ldr r0, 200f // EXC_RETURN_MSP
215    cmp lr, r0
216    bne 100f
217    movs r0, #1
218    msr CONTROL, r0
219    // CONTROL writes must be followed by ISB
220    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
221    isb
222    ldr r1, 300f // EXC_RETURN_PSP
223    bx r1
224
225100: // to_kernel
226    movs r0, #0
227    msr CONTROL, r0
228    // CONTROL writes must be followed by ISB
229    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
230    isb
231    ldr r0, =SYSCALL_FIRED
232    movs r1, #1
233    str r1, [r0, #0]
234    ldr r1, 200f
235    bx r1
236
237    .align 4
238200: // EXC_RETURN_MSP
239    .word 0xFFFFFFF9
240300: // EXC_RETURN_PSP
241    .word 0xFFFFFFFD
242        "
243    );
244}
245
246// Mock implementation for tests on Travis-CI.
247#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
248unsafe extern "C" fn hard_fault_handler() {
249    unimplemented!()
250}
251
252#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
253#[unsafe(naked)]
254unsafe extern "C" fn hard_fault_handler() {
255    use core::arch::naked_asm;
256    // If `kernel_stack` is non-zero, then hard-fault occurred in
257    // kernel, otherwise the hard-fault occurred in user.
258    naked_asm!(
259        "
260    // Will be incremented to 1 when we determine that it was a fault
261    // in the kernel
262    movs r1, #0
263    // r2 is used for testing and r3 is used to store lr
264    mov r3, lr
265
266    movs r2, #4
267    tst r3, r2
268    beq 100f
269
270    // _hardfault_psp:
271    mrs r0, psp
272    b 200f
273
274100: // _hardfault_msp
275    mrs r0, msp
276    adds r1, #1
277
278200: // _hardfault_exit
279
280    // If the hard-fault occurred while executing the kernel (r1 != 0),
281    // jump to the non-naked kernel hard fault handler. This handler
282    // MUST NOT return. The faulting stack is passed as the first argument
283    // (r0).
284    cmp r1, #0                           // Check if app (r1==0) or kernel (r1==1) fault.
285    beq 400f                             // If app fault, skip to app handling.
286    ldr r2, ={kernel_hard_fault_handler} // Load address of fault handler.
287    bx r2                                // Branch to the non-naked fault handler.
288
289400: // _hardfault_app
290    // Otherwise, store that a hardfault occurred in an app, store some CPU
291    // state and finally return to the kernel stack:
292    ldr r0, =APP_HARD_FAULT
293    movs r1, #1 // Fault
294    str r1, [r0, #0]
295
296    // NOTE:
297    // -----
298    //
299    // Even though ARMv6-M SCB and Control registers
300    // are different from ARMv7-M, they are still compatible
301    // with each other. So, we can keep the same code as
302    // ARMv7-M.
303    //
304    // ARMv6-M however has no _privileged_ mode.
305
306    // Read the SCB registers.
307    ldr r0, =SCB_REGISTERS
308    ldr r1, =0xE000ED14
309    ldr r2, [r1, #0] // CCR
310    str r2, [r0, #0]
311    ldr r2, [r1, #20] // CFSR
312    str r2, [r0, #4]
313    ldr r2, [r1, #24] // HFSR
314    str r2, [r0, #8]
315    ldr r2, [r1, #32] // MMFAR
316    str r2, [r0, #12]
317    ldr r2, [r1, #36] // BFAR
318    str r2, [r0, #16]
319
320    // Set thread mode to privileged
321    movs r0, #0
322    msr CONTROL, r0
323    // No ISB required on M0
324    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
325
326    // Load the FEXC_RETURN_MSP LR address and return to it, to switch to the
327    // kernel (MSP) stack:
328    ldr r0, 300f
329    mov lr, r0
330    bx lr
331
332    .align 4
333300: // FEXC_RETURN_MSP
334    .word 0xFFFFFFF9
335        ",
336        kernel_hard_fault_handler = sym hard_fault_handler_kernel,
337    );
338}
339
340// Enum with no variants to ensure that this type is not instantiable. It is
341// only used to pass architecture-specific constants and functions via the
342// `CortexMVariant` trait.
343pub enum CortexM0 {}
344
345impl cortexm::CortexMVariant for CortexM0 {
346    const GENERIC_ISR: unsafe extern "C" fn() = generic_isr;
347    const SYSTICK_HANDLER: unsafe extern "C" fn() = systick_handler;
348    const SVC_HANDLER: unsafe extern "C" fn() = svc_handler;
349    const HARD_FAULT_HANDLER: unsafe extern "C" fn() = hard_fault_handler;
350
351    // Mock implementation for tests on Travis-CI.
352    #[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
353    unsafe fn switch_to_user(
354        _user_stack: *const usize,
355        _process_regs: &mut [usize; 8],
356    ) -> *const usize {
357        unimplemented!()
358    }
359
360    #[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
361    unsafe fn switch_to_user(
362        mut user_stack: *const usize,
363        process_regs: &mut [usize; 8],
364    ) -> *const usize {
365        use core::arch::asm;
366        asm!(
367            "
368    // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
369    // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
370    // the frame pointer. However, in the process of restoring and saving the
371    // process's registers, we do in fact clobber r6, r7 and r9. So, we work
372    // around this by doing our own manual saving of r6 using r2, r7 using r3,
373    // r9 using r12, and then mark those as clobbered.
374    mov r2, r6
375    mov r3, r7
376    mov r12, r9
377
378    // Load non-hardware-stacked registers from Process stack
379    ldmia r1!, {{r4-r7}}
380    mov r11, r7
381    mov r10, r6
382    mov r9,  r5
383    mov r8,  r4
384    ldmia r1!, {{r4-r7}}
385    subs r1, 32 // Restore pointer to process_regs
386                // ldmia! added a 32-byte offset
387
388    // Load bottom of stack into Process Stack Pointer
389    msr psp, r0
390
391    // SWITCH
392    svc 0xff // It doesn't matter which SVC number we use here
393
394    // Store non-hardware-stacked registers in process_regs
395    // r1 still points to process_regs because we are clobbering all
396    // non-hardware-stacked registers
397    str r4, [r1, #16]
398    str r5, [r1, #20]
399    str r6, [r1, #24]
400    str r7, [r1, #28]
401
402    mov  r4, r8
403    mov  r5, r9
404    mov  r6, r10
405    mov  r7, r11
406
407    str r4, [r1, #0]
408    str r5, [r1, #4]
409    str r6, [r1, #8]
410    str r7, [r1, #12]
411
412    mrs r0, PSP // PSP into user_stack
413
414    // Manually restore r6, r7 and r9.
415    mov r6, r2
416    mov r7, r3
417    mov r9, r12
418            ",
419            inout("r0") user_stack,
420            in("r1") process_regs,
421            out("r2") _,
422            out("r3") _,
423            out("r4") _,
424            out("r5") _,
425            out("r8") _,
426            out("r10") _,
427            out("r11") _,
428            out("r12") _,
429        );
430
431        user_stack
432    }
433
434    #[inline]
435    unsafe fn print_cortexm_state(writer: &mut dyn Write) {
436        cortexm::print_cortexm_state(writer)
437    }
438}