1#![crate_name = "cortexm0"]
8#![crate_type = "rlib"]
9#![no_std]
10
11use core::fmt::Write;
12
13pub use cortexm::support;
16
17pub use cortexm::nvic;
18pub use cortexm::syscall;
19
20#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
21struct HardFaultStackedRegisters {
22 r0: u32,
23 r1: u32,
24 r2: u32,
25 r3: u32,
26 r12: u32,
27 lr: u32,
28 pc: u32,
29 xpsr: u32,
30}
31
32#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
33unsafe extern "C" fn hard_fault_handler_kernel(faulting_stack: *mut u32) -> ! {
36 let hardfault_stacked_registers = HardFaultStackedRegisters {
37 r0: *faulting_stack.offset(0),
38 r1: *faulting_stack.offset(1),
39 r2: *faulting_stack.offset(2),
40 r3: *faulting_stack.offset(3),
41 r12: *faulting_stack.offset(4),
42 lr: *faulting_stack.offset(5),
43 pc: *faulting_stack.offset(6),
44 xpsr: *faulting_stack.offset(7),
45 };
46
47 panic!(
48 "Kernel HardFault.\r\n\
49 \tKernel version {}\r\n\
50 \tr0 0x{:x}\r\n\
51 \tr1 0x{:x}\r\n\
52 \tr2 0x{:x}\r\n\
53 \tr3 0x{:x}\r\n\
54 \tr12 0x{:x}\r\n\
55 \tlr 0x{:x}\r\n\
56 \tpc 0x{:x}\r\n\
57 \txpsr 0x{:x}\r\n\
58 ",
59 option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
60 hardfault_stacked_registers.r0,
61 hardfault_stacked_registers.r1,
62 hardfault_stacked_registers.r2,
63 hardfault_stacked_registers.r3,
64 hardfault_stacked_registers.r12,
65 hardfault_stacked_registers.lr,
66 hardfault_stacked_registers.pc,
67 hardfault_stacked_registers.xpsr
68 );
69}
70
71#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
73unsafe extern "C" fn generic_isr() {
74 unimplemented!()
75}
76
77#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
78extern "C" {
79 pub fn generic_isr();
81}
82
83#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
84core::arch::global_asm!(
85 "
86 .section .generic_isr, \"ax\"
87 .global generic_isr
88 .thumb_func
89 generic_isr:
90 /* Skip saving process state if not coming from user-space */
91 ldr r0, 300f // MEXC_RETURN_PSP
92 cmp lr, r0
93 bne 100f
94
95 /* We need to make sure the kernel cotinues the execution after this ISR */
96 movs r0, #0
97 msr CONTROL, r0
98 /* CONTROL writes must be followed by ISB */
99 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
100 isb
101
102 /* We need the most recent kernel's version of r1, which points */
103 /* to the Process struct's stored registers field. The kernel's r1 */
104 /* lives in the second word of the hardware stacked registers on MSP */
105 mov r1, sp
106 ldr r1, [r1, #4]
107 str r4, [r1, #16]
108 str r5, [r1, #20]
109 str r6, [r1, #24]
110 str r7, [r1, #28]
111
112 push {{r4-r7}}
113 mov r4, r8
114 mov r5, r9
115 mov r6, r10
116 mov r7, r11
117 str r4, [r1, #0]
118 str r5, [r1, #4]
119 str r6, [r1, #8]
120 str r7, [r1, #12]
121 pop {{r4-r7}}
122
123 ldr r0, 200f // MEXC_RETURN_MSP
124 mov lr, r0
125100: // _ggeneric_isr_no_stacking
126 /* Find the ISR number by looking at the low byte of the IPSR registers */
127 mrs r0, IPSR
128 movs r1, #0xff
129 ands r0, r1
130 /* ISRs start at 16, so subtract 16 to get zero-indexed */
131 subs r0, r0, #16
132
133 /*
134 * High level:
135 * NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
136 * */
137 /* r3 = &NVIC.ICER[r0 / 32] */
138 ldr r2, 101f /* r2 = &NVIC.ICER */
139 lsrs r3, r0, #5 /* r3 = r0 / 32 */
140 lsls r3, r3, #2 /* ICER is word-sized, so multiply offset by 4 */
141 adds r3, r3, r2 /* r3 = r2 + r3 */
142
143 /* r2 = 1 << (r0 & 31) */
144 movs r2, #31 /* r2 = 31 */
145 ands r0, r2 /* r0 = r0 & r2 */
146 subs r2, r2, #30 /* r2 = r2 - 30 i.e. r2 = 1 */
147 lsls r2, r2, r0 /* r2 = 1 << r0 */
148
149 /* *r3 = r2 */
150 str r2, [r3]
151
152 /* The pending bit in ISPR might be reset by hardware for pulse interrupts
153 * at this point. So set it here again so the interrupt does not get lost
154 * in service_pending_interrupts()
155 *
156 * The NVIC.ISPR base is 0xE000E200, which is 0x20 (aka #32) above the
157 * NVIC.ICER base. Calculate the ISPR address by offsetting from the ICER
158 * address so as to avoid re-doing the [r0 / 32] index math.
159 */
160 adds r3, #32
161 str r2, [r3]
162
163 bx lr /* return here since we have extra words in the assembly */
164
165.align 4
166101: // NVICICER
167 .word 0xE000E180
168200: // MEXC_RETURN_MSP
169 .word 0xFFFFFFF9
170300: // MEXC_RETURN_PSP
171 .word 0xFFFFFFFD"
172);
173
174#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
176unsafe extern "C" fn systick_handler_m0() {
177 unimplemented!()
178}
179
180#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
181extern "C" {
182 pub fn systick_handler_m0();
189}
190
191#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
192core::arch::global_asm!(
193 "
194 .section .systick_handler_m0, \"ax\"
195 .global systick_handler_m0
196 .thumb_func
197 systick_handler_m0:
198
199 // Set thread mode to privileged to switch back to kernel mode.
200 movs r0, #0
201 msr CONTROL, r0
202 /* CONTROL writes must be followed by ISB */
203 /* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html */
204 isb
205
206 ldr r0, 100f // ST_EXC_RETURN_MSP
207
208 // This will resume in the switch to user function where application state
209 // is saved and the scheduler can choose what to do next.
210 bx r0
211.align 4
212100: // ST_EXC_RETURN_MSP
213 .word 0xFFFFFFF9
214 "
215);
216
217#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
219unsafe extern "C" fn svc_handler() {
220 unimplemented!()
221}
222
223#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
224extern "C" {
225 pub fn svc_handler();
226}
227
228#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
229core::arch::global_asm!(
230 "
231 .section .svc_handler, \"ax\"
232 .global svc_handler
233 .thumb_func
234svc_handler:
235 ldr r0, 200f // EXC_RETURN_MSP
236 cmp lr, r0
237 bne 100f
238 movs r0, #1
239 msr CONTROL, r0
240 /* CONTROL writes must be followed by ISB */
241 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
242 isb
243 ldr r1, 300f // EXC_RETURN_PSP
244 bx r1
245
246100: // to_kernel
247 movs r0, #0
248 msr CONTROL, r0
249 /* CONTROL writes must be followed by ISB */
250 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
251 isb
252 ldr r0, =SYSCALL_FIRED
253 movs r1, #1
254 str r1, [r0, #0]
255 ldr r1, 200f
256 bx r1
257
258.align 4
259200: // EXC_RETURN_MSP
260 .word 0xFFFFFFF9
261300: // EXC_RETURN_PSP
262 .word 0xFFFFFFFD
263 "
264);
265
266#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
268unsafe extern "C" fn hard_fault_handler() {
269 unimplemented!()
270}
271
272#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
273extern "C" {
274 pub fn hard_fault_handler();
275}
276
277#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
278core::arch::global_asm!(
281"
282 .section .hard_fault_handler, \"ax\"
283 .global hard_fault_handler
284 .thumb_func
285 hard_fault_handler:
286 /*
287 * Will be incremented to 1 when we determine that it was a fault
288 * in the kernel
289 */
290 movs r1, #0
291 /*
292 * r2 is used for testing and r3 is used to store lr
293 */
294 mov r3, lr
295
296 movs r2, #4
297 tst r3, r2
298 beq 100f
299
300// _hardfault_psp:
301 mrs r0, psp
302 b 200f
303
304100: // _hardfault_msp
305 mrs r0, msp
306 adds r1, #1
307
308200: // _hardfault_exit
309
310 // If the hard-fault occurred while executing the kernel (r1 != 0),
311 // jump to the non-naked kernel hard fault handler. This handler
312 // MUST NOT return. The faulting stack is passed as the first argument
313 // (r0).
314 cmp r1, #0 // Check if app (r1==0) or kernel (r1==1) fault.
315 beq 400f // If app fault, skip to app handling.
316 ldr r2, ={kernel_hard_fault_handler} // Load address of fault handler.
317 bx r2 // Branch to the non-naked fault handler.
318
319400: // _hardfault_app
320 // Otherwise, store that a hardfault occurred in an app, store some CPU
321 // state and finally return to the kernel stack:
322 ldr r0, =APP_HARD_FAULT
323 movs r1, #1 /* Fault */
324 str r1, [r0, #0]
325
326 /*
327 * NOTE:
328 * -----
329 *
330 * Even though ARMv6-M SCB and Control registers
331 * are different from ARMv7-M, they are still compatible
332 * with each other. So, we can keep the same code as
333 * ARMv7-M.
334 *
335 * ARMv6-M however has no _privileged_ mode.
336 */
337
338 /* Read the SCB registers. */
339 ldr r0, =SCB_REGISTERS
340 ldr r1, =0xE000ED14
341 ldr r2, [r1, #0] /* CCR */
342 str r2, [r0, #0]
343 ldr r2, [r1, #20] /* CFSR */
344 str r2, [r0, #4]
345 ldr r2, [r1, #24] /* HFSR */
346 str r2, [r0, #8]
347 ldr r2, [r1, #32] /* MMFAR */
348 str r2, [r0, #12]
349 ldr r2, [r1, #36] /* BFAR */
350 str r2, [r0, #16]
351
352 /* Set thread mode to privileged */
353 movs r0, #0
354 msr CONTROL, r0
355 /* No ISB required on M0 */
356 /* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html */
357
358 // Load the FEXC_RETURN_MSP LR address and return to it, to switch to the
359 // kernel (MSP) stack:
360 ldr r0, 300f
361 mov lr, r0
362 bx lr
363
364 .align 4
365300: // FEXC_RETURN_MSP
366 .word 0xFFFFFFF9
367 ",
368 kernel_hard_fault_handler = sym hard_fault_handler_kernel,
369);
370
371pub enum CortexM0 {}
375
376impl cortexm::CortexMVariant for CortexM0 {
377 const GENERIC_ISR: unsafe extern "C" fn() = generic_isr;
378 const SYSTICK_HANDLER: unsafe extern "C" fn() = systick_handler_m0;
379 const SVC_HANDLER: unsafe extern "C" fn() = svc_handler;
380 const HARD_FAULT_HANDLER: unsafe extern "C" fn() = hard_fault_handler;
381
382 #[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
384 unsafe fn switch_to_user(
385 _user_stack: *const usize,
386 _process_regs: &mut [usize; 8],
387 ) -> *const usize {
388 unimplemented!()
389 }
390
391 #[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
392 unsafe fn switch_to_user(
393 mut user_stack: *const usize,
394 process_regs: &mut [usize; 8],
395 ) -> *const usize {
396 use core::arch::asm;
397 asm!("
398 // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
399 // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
400 // the frame pointer. However, in the process of restoring and saving the
401 // process's registers, we do in fact clobber r6, r7 and r9. So, we work
402 // around this by doing our own manual saving of r6 using r2, r7 using r3,
403 // r9 using r12, and then mark those as clobbered.
404 mov r2, r6
405 mov r3, r7
406 mov r12, r9
407
408 /* Load non-hardware-stacked registers from Process stack */
409 ldmia r1!, {{r4-r7}}
410 mov r11, r7
411 mov r10, r6
412 mov r9, r5
413 mov r8, r4
414 ldmia r1!, {{r4-r7}}
415 subs r1, 32 /* Restore pointer to process_regs
416 /* ldmia! added a 32-byte offset */
417
418 /* Load bottom of stack into Process Stack Pointer */
419 msr psp, r0
420
421 /* SWITCH */
422 svc 0xff /* It doesn't matter which SVC number we use here */
423
424 /* Store non-hardware-stacked registers in process_regs */
425 /* r1 still points to process_regs because we are clobbering all */
426 /* non-hardware-stacked registers */
427 str r4, [r1, #16]
428 str r5, [r1, #20]
429 str r6, [r1, #24]
430 str r7, [r1, #28]
431
432 mov r4, r8
433 mov r5, r9
434 mov r6, r10
435 mov r7, r11
436
437 str r4, [r1, #0]
438 str r5, [r1, #4]
439 str r6, [r1, #8]
440 str r7, [r1, #12]
441
442 mrs r0, PSP /* PSP into user_stack */
443
444 // Manually restore r6, r7 and r9.
445 mov r6, r2
446 mov r7, r3
447 mov r9, r12
448
449 ",
450 inout("r0") user_stack,
451 in("r1") process_regs,
452 out("r2") _, out("r3") _, out("r4") _, out("r5") _, out("r8") _,
453 out("r10") _, out("r11") _, out("r12") _);
454
455 user_stack
456 }
457
458 #[inline]
459 unsafe fn print_cortexm_state(writer: &mut dyn Write) {
460 cortexm::print_cortexm_state(writer)
461 }
462}