1#![no_std]
8
9use core::fmt::Write;
10
11pub use cortexm::support;
14
15pub use cortexm::nvic;
16pub use cortexm::syscall;
17
18#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
19struct HardFaultStackedRegisters {
20 r0: u32,
21 r1: u32,
22 r2: u32,
23 r3: u32,
24 r12: u32,
25 lr: u32,
26 pc: u32,
27 xpsr: u32,
28}
29
30#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
31unsafe extern "C" fn hard_fault_handler_kernel(faulting_stack: *mut u32) -> ! {
34 let hardfault_stacked_registers = HardFaultStackedRegisters {
35 r0: *faulting_stack.offset(0),
36 r1: *faulting_stack.offset(1),
37 r2: *faulting_stack.offset(2),
38 r3: *faulting_stack.offset(3),
39 r12: *faulting_stack.offset(4),
40 lr: *faulting_stack.offset(5),
41 pc: *faulting_stack.offset(6),
42 xpsr: *faulting_stack.offset(7),
43 };
44
45 panic!(
46 "Kernel HardFault.\r\n\
47 \tKernel version {}\r\n\
48 \tr0 0x{:x}\r\n\
49 \tr1 0x{:x}\r\n\
50 \tr2 0x{:x}\r\n\
51 \tr3 0x{:x}\r\n\
52 \tr12 0x{:x}\r\n\
53 \tlr 0x{:x}\r\n\
54 \tpc 0x{:x}\r\n\
55 \txpsr 0x{:x}\r\n\
56 ",
57 option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
58 hardfault_stacked_registers.r0,
59 hardfault_stacked_registers.r1,
60 hardfault_stacked_registers.r2,
61 hardfault_stacked_registers.r3,
62 hardfault_stacked_registers.r12,
63 hardfault_stacked_registers.lr,
64 hardfault_stacked_registers.pc,
65 hardfault_stacked_registers.xpsr
66 );
67}
68
69#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
71unsafe extern "C" fn generic_isr() {
72 unimplemented!()
73}
74
75#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
76extern "C" {
77 pub fn generic_isr();
79}
80
81#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
82core::arch::global_asm!(
83 "
84 .section .generic_isr, \"ax\"
85 .global generic_isr
86 .thumb_func
87 generic_isr:
88 /* Skip saving process state if not coming from user-space */
89 ldr r0, 300f // MEXC_RETURN_PSP
90 cmp lr, r0
91 bne 100f
92
93 /* We need to make sure the kernel cotinues the execution after this ISR */
94 movs r0, #0
95 msr CONTROL, r0
96 /* CONTROL writes must be followed by ISB */
97 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
98 isb
99
100 /* We need the most recent kernel's version of r1, which points */
101 /* to the Process struct's stored registers field. The kernel's r1 */
102 /* lives in the second word of the hardware stacked registers on MSP */
103 mov r1, sp
104 ldr r1, [r1, #4]
105 str r4, [r1, #16]
106 str r5, [r1, #20]
107 str r6, [r1, #24]
108 str r7, [r1, #28]
109
110 push {{r4-r7}}
111 mov r4, r8
112 mov r5, r9
113 mov r6, r10
114 mov r7, r11
115 str r4, [r1, #0]
116 str r5, [r1, #4]
117 str r6, [r1, #8]
118 str r7, [r1, #12]
119 pop {{r4-r7}}
120
121 ldr r0, 200f // MEXC_RETURN_MSP
122 mov lr, r0
123100: // _ggeneric_isr_no_stacking
124 /* Find the ISR number by looking at the low byte of the IPSR registers */
125 mrs r0, IPSR
126 movs r1, #0xff
127 ands r0, r1
128 /* ISRs start at 16, so subtract 16 to get zero-indexed */
129 subs r0, r0, #16
130
131 /*
132 * High level:
133 * NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
134 * */
135 /* r3 = &NVIC.ICER[r0 / 32] */
136 ldr r2, 101f /* r2 = &NVIC.ICER */
137 lsrs r3, r0, #5 /* r3 = r0 / 32 */
138 lsls r3, r3, #2 /* ICER is word-sized, so multiply offset by 4 */
139 adds r3, r3, r2 /* r3 = r2 + r3 */
140
141 /* r2 = 1 << (r0 & 31) */
142 movs r2, #31 /* r2 = 31 */
143 ands r0, r2 /* r0 = r0 & r2 */
144 subs r2, r2, #30 /* r2 = r2 - 30 i.e. r2 = 1 */
145 lsls r2, r2, r0 /* r2 = 1 << r0 */
146
147 /* *r3 = r2 */
148 str r2, [r3]
149
150 /* The pending bit in ISPR might be reset by hardware for pulse interrupts
151 * at this point. So set it here again so the interrupt does not get lost
152 * in service_pending_interrupts()
153 *
154 * The NVIC.ISPR base is 0xE000E200, which is 0x20 (aka #32) above the
155 * NVIC.ICER base. Calculate the ISPR address by offsetting from the ICER
156 * address so as to avoid re-doing the [r0 / 32] index math.
157 */
158 adds r3, #32
159 str r2, [r3]
160
161 bx lr /* return here since we have extra words in the assembly */
162
163.align 4
164101: // NVICICER
165 .word 0xE000E180
166200: // MEXC_RETURN_MSP
167 .word 0xFFFFFFF9
168300: // MEXC_RETURN_PSP
169 .word 0xFFFFFFFD"
170);
171
172#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
174unsafe extern "C" fn systick_handler_m0() {
175 unimplemented!()
176}
177
178#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
179extern "C" {
180 pub fn systick_handler_m0();
187}
188
189#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
190core::arch::global_asm!(
191 "
192 .section .systick_handler_m0, \"ax\"
193 .global systick_handler_m0
194 .thumb_func
195 systick_handler_m0:
196
197 // Set thread mode to privileged to switch back to kernel mode.
198 movs r0, #0
199 msr CONTROL, r0
200 /* CONTROL writes must be followed by ISB */
201 /* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html */
202 isb
203
204 ldr r0, 100f // ST_EXC_RETURN_MSP
205
206 // This will resume in the switch to user function where application state
207 // is saved and the scheduler can choose what to do next.
208 bx r0
209.align 4
210100: // ST_EXC_RETURN_MSP
211 .word 0xFFFFFFF9
212 "
213);
214
215#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
217unsafe extern "C" fn svc_handler() {
218 unimplemented!()
219}
220
221#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
222extern "C" {
223 pub fn svc_handler();
224}
225
226#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
227core::arch::global_asm!(
228 "
229 .section .svc_handler, \"ax\"
230 .global svc_handler
231 .thumb_func
232svc_handler:
233 ldr r0, 200f // EXC_RETURN_MSP
234 cmp lr, r0
235 bne 100f
236 movs r0, #1
237 msr CONTROL, r0
238 /* CONTROL writes must be followed by ISB */
239 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
240 isb
241 ldr r1, 300f // EXC_RETURN_PSP
242 bx r1
243
244100: // to_kernel
245 movs r0, #0
246 msr CONTROL, r0
247 /* CONTROL writes must be followed by ISB */
248 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
249 isb
250 ldr r0, =SYSCALL_FIRED
251 movs r1, #1
252 str r1, [r0, #0]
253 ldr r1, 200f
254 bx r1
255
256.align 4
257200: // EXC_RETURN_MSP
258 .word 0xFFFFFFF9
259300: // EXC_RETURN_PSP
260 .word 0xFFFFFFFD
261 "
262);
263
264#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
266unsafe extern "C" fn hard_fault_handler() {
267 unimplemented!()
268}
269
270#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
271extern "C" {
272 pub fn hard_fault_handler();
273}
274
275#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
276core::arch::global_asm!(
279"
280 .section .hard_fault_handler, \"ax\"
281 .global hard_fault_handler
282 .thumb_func
283 hard_fault_handler:
284 /*
285 * Will be incremented to 1 when we determine that it was a fault
286 * in the kernel
287 */
288 movs r1, #0
289 /*
290 * r2 is used for testing and r3 is used to store lr
291 */
292 mov r3, lr
293
294 movs r2, #4
295 tst r3, r2
296 beq 100f
297
298// _hardfault_psp:
299 mrs r0, psp
300 b 200f
301
302100: // _hardfault_msp
303 mrs r0, msp
304 adds r1, #1
305
306200: // _hardfault_exit
307
308 // If the hard-fault occurred while executing the kernel (r1 != 0),
309 // jump to the non-naked kernel hard fault handler. This handler
310 // MUST NOT return. The faulting stack is passed as the first argument
311 // (r0).
312 cmp r1, #0 // Check if app (r1==0) or kernel (r1==1) fault.
313 beq 400f // If app fault, skip to app handling.
314 ldr r2, ={kernel_hard_fault_handler} // Load address of fault handler.
315 bx r2 // Branch to the non-naked fault handler.
316
317400: // _hardfault_app
318 // Otherwise, store that a hardfault occurred in an app, store some CPU
319 // state and finally return to the kernel stack:
320 ldr r0, =APP_HARD_FAULT
321 movs r1, #1 /* Fault */
322 str r1, [r0, #0]
323
324 /*
325 * NOTE:
326 * -----
327 *
328 * Even though ARMv6-M SCB and Control registers
329 * are different from ARMv7-M, they are still compatible
330 * with each other. So, we can keep the same code as
331 * ARMv7-M.
332 *
333 * ARMv6-M however has no _privileged_ mode.
334 */
335
336 /* Read the SCB registers. */
337 ldr r0, =SCB_REGISTERS
338 ldr r1, =0xE000ED14
339 ldr r2, [r1, #0] /* CCR */
340 str r2, [r0, #0]
341 ldr r2, [r1, #20] /* CFSR */
342 str r2, [r0, #4]
343 ldr r2, [r1, #24] /* HFSR */
344 str r2, [r0, #8]
345 ldr r2, [r1, #32] /* MMFAR */
346 str r2, [r0, #12]
347 ldr r2, [r1, #36] /* BFAR */
348 str r2, [r0, #16]
349
350 /* Set thread mode to privileged */
351 movs r0, #0
352 msr CONTROL, r0
353 /* No ISB required on M0 */
354 /* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html */
355
356 // Load the FEXC_RETURN_MSP LR address and return to it, to switch to the
357 // kernel (MSP) stack:
358 ldr r0, 300f
359 mov lr, r0
360 bx lr
361
362 .align 4
363300: // FEXC_RETURN_MSP
364 .word 0xFFFFFFF9
365 ",
366 kernel_hard_fault_handler = sym hard_fault_handler_kernel,
367);
368
369pub enum CortexM0 {}
373
374impl cortexm::CortexMVariant for CortexM0 {
375 const GENERIC_ISR: unsafe extern "C" fn() = generic_isr;
376 const SYSTICK_HANDLER: unsafe extern "C" fn() = systick_handler_m0;
377 const SVC_HANDLER: unsafe extern "C" fn() = svc_handler;
378 const HARD_FAULT_HANDLER: unsafe extern "C" fn() = hard_fault_handler;
379
380 #[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
382 unsafe fn switch_to_user(
383 _user_stack: *const usize,
384 _process_regs: &mut [usize; 8],
385 ) -> *const usize {
386 unimplemented!()
387 }
388
389 #[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
390 unsafe fn switch_to_user(
391 mut user_stack: *const usize,
392 process_regs: &mut [usize; 8],
393 ) -> *const usize {
394 use core::arch::asm;
395 asm!("
396 // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
397 // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
398 // the frame pointer. However, in the process of restoring and saving the
399 // process's registers, we do in fact clobber r6, r7 and r9. So, we work
400 // around this by doing our own manual saving of r6 using r2, r7 using r3,
401 // r9 using r12, and then mark those as clobbered.
402 mov r2, r6
403 mov r3, r7
404 mov r12, r9
405
406 /* Load non-hardware-stacked registers from Process stack */
407 ldmia r1!, {{r4-r7}}
408 mov r11, r7
409 mov r10, r6
410 mov r9, r5
411 mov r8, r4
412 ldmia r1!, {{r4-r7}}
413 subs r1, 32 /* Restore pointer to process_regs
414 /* ldmia! added a 32-byte offset */
415
416 /* Load bottom of stack into Process Stack Pointer */
417 msr psp, r0
418
419 /* SWITCH */
420 svc 0xff /* It doesn't matter which SVC number we use here */
421
422 /* Store non-hardware-stacked registers in process_regs */
423 /* r1 still points to process_regs because we are clobbering all */
424 /* non-hardware-stacked registers */
425 str r4, [r1, #16]
426 str r5, [r1, #20]
427 str r6, [r1, #24]
428 str r7, [r1, #28]
429
430 mov r4, r8
431 mov r5, r9
432 mov r6, r10
433 mov r7, r11
434
435 str r4, [r1, #0]
436 str r5, [r1, #4]
437 str r6, [r1, #8]
438 str r7, [r1, #12]
439
440 mrs r0, PSP /* PSP into user_stack */
441
442 // Manually restore r6, r7 and r9.
443 mov r6, r2
444 mov r7, r3
445 mov r9, r12
446
447 ",
448 inout("r0") user_stack,
449 in("r1") process_regs,
450 out("r2") _, out("r3") _, out("r4") _, out("r5") _, out("r8") _,
451 out("r10") _, out("r11") _, out("r12") _);
452
453 user_stack
454 }
455
456 #[inline]
457 unsafe fn print_cortexm_state(writer: &mut dyn Write) {
458 cortexm::print_cortexm_state(writer)
459 }
460}