cortexm0/
lib.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Shared implementations for ARM Cortex-M0 MCUs.
6
7#![no_std]
8
9use core::fmt::Write;
10
11// Re-export the base generic cortex-m functions here as they are
12// valid on cortex-m0.
13pub use cortexm::support;
14
15pub use cortexm::nvic;
16pub use cortexm::syscall;
17pub use cortexm::thread_id;
18
19#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
20struct HardFaultStackedRegisters {
21    r0: u32,
22    r1: u32,
23    r2: u32,
24    r3: u32,
25    r12: u32,
26    lr: u32,
27    pc: u32,
28    xpsr: u32,
29}
30
31#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
32/// Handle a hard fault that occurred in the kernel. This function is invoked
33/// by the naked hard_fault_handler function.
34unsafe extern "C" fn hard_fault_handler_kernel(faulting_stack: *mut u32) -> ! {
35    let hardfault_stacked_registers = HardFaultStackedRegisters {
36        r0: *faulting_stack.offset(0),
37        r1: *faulting_stack.offset(1),
38        r2: *faulting_stack.offset(2),
39        r3: *faulting_stack.offset(3),
40        r12: *faulting_stack.offset(4),
41        lr: *faulting_stack.offset(5),
42        pc: *faulting_stack.offset(6),
43        xpsr: *faulting_stack.offset(7),
44    };
45
46    panic!(
47        "Kernel HardFault.\r\n\
48         \tKernel version {}\r\n\
49         \tr0  0x{:x}\r\n\
50         \tr1  0x{:x}\r\n\
51         \tr2  0x{:x}\r\n\
52         \tr3  0x{:x}\r\n\
53         \tr12  0x{:x}\r\n\
54         \tlr  0x{:x}\r\n\
55         \tpc  0x{:x}\r\n\
56         \txpsr  0x{:x}\r\n\
57         ",
58        kernel::TOCK_KERNEL_VERSION,
59        hardfault_stacked_registers.r0,
60        hardfault_stacked_registers.r1,
61        hardfault_stacked_registers.r2,
62        hardfault_stacked_registers.r3,
63        hardfault_stacked_registers.r12,
64        hardfault_stacked_registers.lr,
65        hardfault_stacked_registers.pc,
66        hardfault_stacked_registers.xpsr
67    );
68}
69
70// Mock implementation for tests on Travis-CI.
71#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
72unsafe extern "C" fn generic_isr() {
73    unimplemented!()
74}
75
76#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
77#[unsafe(naked)]
78/// All ISRs are caught by this handler which disables the NVIC and switches to the kernel.
79unsafe extern "C" fn generic_isr() {
80    use core::arch::naked_asm;
81    naked_asm!(
82        "
83    // Skip saving process state if not coming from user-space
84    ldr r0, 300f // MEXC_RETURN_PSP
85    cmp lr, r0
86    bne 100f
87
88    // We need to make sure the kernel continues the execution after this ISR
89    movs r0, #0
90    msr CONTROL, r0
91    // CONTROL writes must be followed by ISB
92    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
93    isb
94
95    // We need the most recent kernel's version of r1, which points
96    // to the Process struct's stored registers field. The kernel's r1
97    // lives in the second word of the hardware stacked registers on MSP
98    mov r1, sp
99    ldr r1, [r1, #4]
100    str r4, [r1, #16]
101    str r5, [r1, #20]
102    str r6, [r1, #24]
103    str r7, [r1, #28]
104
105    push {{r4-r7}}
106    mov  r4, r8
107    mov  r5, r9
108    mov  r6, r10
109    mov  r7, r11
110    str r4, [r1, #0]
111    str r5, [r1, #4]
112    str r6, [r1, #8]
113    str r7, [r1, #12]
114    pop {{r4-r7}}
115
116    ldr r0, 200f // MEXC_RETURN_MSP
117    mov lr, r0
118100: // _ggeneric_isr_no_stacking
119    // Find the ISR number by looking at the low byte of the IPSR registers
120    mrs r0, IPSR
121    movs r1, #0xff
122    ands r0, r1
123    // ISRs start at 16, so subtract 16 to get zero-indexed
124    subs r0, r0, #16
125
126    // High level:
127    //    NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
128    //
129    // r3 = &NVIC.ICER[r0 / 32]
130    ldr r2, 101f      // r2 = &NVIC.ICER
131    lsrs r3, r0, #5   // r3 = r0 / 32
132    lsls r3, r3, #2   // ICER is word-sized, so multiply offset by 4
133    adds r3, r3, r2   // r3 = r2 + r3
134
135    // r2 = 1 << (r0 & 31)
136    movs r2, #31      // r2 = 31
137    ands r0, r2       // r0 = r0 & r2
138    subs r2, r2, #30  // r2 = r2 - 30 i.e. r2 = 1
139    lsls r2, r2, r0   // r2 = 1 << r0
140
141    // *r3 = r2
142    str r2, [r3]
143
144    // The pending bit in ISPR might be reset by hardware for pulse interrupts
145    // at this point. So set it here again so the interrupt does not get lost
146    // in service_pending_interrupts()
147    //
148    // The NVIC.ISPR base is 0xE000E200, which is 0x20 (aka #32) above the
149    // NVIC.ICER base.  Calculate the ISPR address by offsetting from the ICER
150    // address so as to avoid re-doing the [r0 / 32] index math.
151    adds r3, #32
152    str r2, [r3]
153
154    bx lr // return here since we have extra words in the assembly
155
156.align 4
157101: // NVICICER
158    .word 0xE000E180
159200: // MEXC_RETURN_MSP
160    .word 0xFFFFFFF9
161300: // MEXC_RETURN_PSP
162    .word 0xFFFFFFFD
163        "
164    );
165}
166
167// Mock implementation for tests on Travis-CI.
168#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
169unsafe extern "C" fn systick_handler() {
170    unimplemented!()
171}
172
173/// The `systick_handler` is called when the systick interrupt occurs, signaling
174/// that an application executed for longer than its timeslice. This interrupt
175/// handler is no longer responsible for signaling to the kernel thread that an
176/// interrupt has occurred, but is slightly more efficient than the
177/// `generic_isr` handler on account of not needing to mark the interrupt as
178/// pending.
179#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
180#[unsafe(naked)]
181unsafe extern "C" fn systick_handler() {
182    use core::arch::naked_asm;
183    naked_asm!(
184        "
185    // Set thread mode to privileged to switch back to kernel mode.
186    movs r0, #0
187    msr CONTROL, r0
188    // CONTROL writes must be followed by ISB
189    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
190    isb
191
192    ldr r0, 100f // ST_EXC_RETURN_MSP
193
194    // This will resume in the switch to user function where application state
195    // is saved and the scheduler can choose what to do next.
196    bx   r0
197    .align 4
198100: // ST_EXC_RETURN_MSP
199    .word 0xFFFFFFF9
200        "
201    );
202}
203
204// Mock implementation for tests on Travis-CI.
205#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
206unsafe extern "C" fn svc_handler() {
207    unimplemented!()
208}
209
210#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
211#[unsafe(naked)]
212unsafe extern "C" fn svc_handler() {
213    use core::arch::naked_asm;
214    naked_asm!(
215        "
216    ldr r0, 200f // EXC_RETURN_MSP
217    cmp lr, r0
218    bne 100f
219    movs r0, #1
220    msr CONTROL, r0
221    // CONTROL writes must be followed by ISB
222    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
223    isb
224    ldr r1, 300f // EXC_RETURN_PSP
225    bx r1
226
227100: // to_kernel
228    movs r0, #0
229    msr CONTROL, r0
230    // CONTROL writes must be followed by ISB
231    // https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers
232    isb
233    ldr r0, =SYSCALL_FIRED
234    movs r1, #1
235    str r1, [r0, #0]
236    ldr r1, 200f
237    bx r1
238
239    .align 4
240200: // EXC_RETURN_MSP
241    .word 0xFFFFFFF9
242300: // EXC_RETURN_PSP
243    .word 0xFFFFFFFD
244        "
245    );
246}
247
248// Mock implementation for tests on Travis-CI.
249#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
250unsafe extern "C" fn hard_fault_handler() {
251    unimplemented!()
252}
253
254#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
255#[unsafe(naked)]
256unsafe extern "C" fn hard_fault_handler() {
257    use core::arch::naked_asm;
258    // If `kernel_stack` is non-zero, then hard-fault occurred in
259    // kernel, otherwise the hard-fault occurred in user.
260    naked_asm!(
261        "
262    // Will be incremented to 1 when we determine that it was a fault
263    // in the kernel
264    movs r1, #0
265    // r2 is used for testing and r3 is used to store lr
266    mov r3, lr
267
268    movs r2, #4
269    tst r3, r2
270    beq 100f
271
272    // _hardfault_psp:
273    mrs r0, psp
274    b 200f
275
276100: // _hardfault_msp
277    mrs r0, msp
278    adds r1, #1
279
280200: // _hardfault_exit
281
282    // If the hard-fault occurred while executing the kernel (r1 != 0),
283    // jump to the non-naked kernel hard fault handler. This handler
284    // MUST NOT return. The faulting stack is passed as the first argument
285    // (r0).
286    cmp r1, #0                           // Check if app (r1==0) or kernel (r1==1) fault.
287    beq 400f                             // If app fault, skip to app handling.
288    ldr r2, ={kernel_hard_fault_handler} // Load address of fault handler.
289    bx r2                                // Branch to the non-naked fault handler.
290
291400: // _hardfault_app
292    // Otherwise, store that a hardfault occurred in an app, store some CPU
293    // state and finally return to the kernel stack:
294    ldr r0, =APP_HARD_FAULT
295    movs r1, #1 // Fault
296    str r1, [r0, #0]
297
298    // NOTE:
299    // -----
300    //
301    // Even though ARMv6-M SCB and Control registers
302    // are different from ARMv7-M, they are still compatible
303    // with each other. So, we can keep the same code as
304    // ARMv7-M.
305    //
306    // ARMv6-M however has no _privileged_ mode.
307
308    // Read the SCB registers.
309    ldr r0, =SCB_REGISTERS
310    ldr r1, =0xE000ED14
311    ldr r2, [r1, #0] // CCR
312    str r2, [r0, #0]
313    ldr r2, [r1, #20] // CFSR
314    str r2, [r0, #4]
315    ldr r2, [r1, #24] // HFSR
316    str r2, [r0, #8]
317    ldr r2, [r1, #32] // MMFAR
318    str r2, [r0, #12]
319    ldr r2, [r1, #36] // BFAR
320    str r2, [r0, #16]
321
322    // Set thread mode to privileged
323    movs r0, #0
324    msr CONTROL, r0
325    // No ISB required on M0
326    // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
327
328    // Load the FEXC_RETURN_MSP LR address and return to it, to switch to the
329    // kernel (MSP) stack:
330    ldr r0, 300f
331    mov lr, r0
332    bx lr
333
334    .align 4
335300: // FEXC_RETURN_MSP
336    .word 0xFFFFFFF9
337        ",
338        kernel_hard_fault_handler = sym hard_fault_handler_kernel,
339    );
340}
341
342// Enum with no variants to ensure that this type is not instantiable. It is
343// only used to pass architecture-specific constants and functions via the
344// `CortexMVariant` trait.
345pub enum CortexM0 {}
346
347impl cortexm::CortexMVariant for CortexM0 {
348    const GENERIC_ISR: unsafe extern "C" fn() = generic_isr;
349    const SYSTICK_HANDLER: unsafe extern "C" fn() = systick_handler;
350    const SVC_HANDLER: unsafe extern "C" fn() = svc_handler;
351    const HARD_FAULT_HANDLER: unsafe extern "C" fn() = hard_fault_handler;
352
353    // Mock implementation for tests on Travis-CI.
354    #[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
355    unsafe fn switch_to_user(
356        _user_stack: *const usize,
357        _process_regs: &mut [usize; 8],
358    ) -> *const usize {
359        unimplemented!()
360    }
361
362    #[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
363    unsafe fn switch_to_user(
364        mut user_stack: *const usize,
365        process_regs: &mut [usize; 8],
366    ) -> *const usize {
367        use core::arch::asm;
368        asm!(
369            "
370    // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
371    // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
372    // the frame pointer. However, in the process of restoring and saving the
373    // process's registers, we do in fact clobber r6, r7 and r9. So, we work
374    // around this by doing our own manual saving of r6 using r2, r7 using r3,
375    // r9 using r12, and then mark those as clobbered.
376    mov r2, r6
377    mov r3, r7
378    mov r12, r9
379
380    // Load non-hardware-stacked registers from Process stack
381    ldmia r1!, {{r4-r7}}
382    mov r11, r7
383    mov r10, r6
384    mov r9,  r5
385    mov r8,  r4
386    ldmia r1!, {{r4-r7}}
387    subs r1, 32 // Restore pointer to process_regs
388                // ldmia! added a 32-byte offset
389
390    // Load bottom of stack into Process Stack Pointer
391    msr psp, r0
392
393    // SWITCH
394    svc 0xff // It doesn't matter which SVC number we use here
395
396    // Store non-hardware-stacked registers in process_regs
397    // r1 still points to process_regs because we are clobbering all
398    // non-hardware-stacked registers
399    str r4, [r1, #16]
400    str r5, [r1, #20]
401    str r6, [r1, #24]
402    str r7, [r1, #28]
403
404    mov  r4, r8
405    mov  r5, r9
406    mov  r6, r10
407    mov  r7, r11
408
409    str r4, [r1, #0]
410    str r5, [r1, #4]
411    str r6, [r1, #8]
412    str r7, [r1, #12]
413
414    mrs r0, PSP // PSP into user_stack
415
416    // Manually restore r6, r7 and r9.
417    mov r6, r2
418    mov r7, r3
419    mov r9, r12
420            ",
421            inout("r0") user_stack,
422            in("r1") process_regs,
423            out("r2") _,
424            out("r3") _,
425            out("r4") _,
426            out("r5") _,
427            out("r8") _,
428            out("r10") _,
429            out("r11") _,
430            out("r12") _,
431        );
432
433        user_stack
434    }
435
436    #[inline]
437    unsafe fn print_cortexm_state(writer: &mut dyn Write) {
438        cortexm::print_cortexm_state(writer)
439    }
440}