1#![no_std]
8
9use core::fmt::Write;
10
11pub use cortexm::support;
14
15pub use cortexm::nvic;
16pub use cortexm::syscall;
17
18#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
19struct HardFaultStackedRegisters {
20 r0: u32,
21 r1: u32,
22 r2: u32,
23 r3: u32,
24 r12: u32,
25 lr: u32,
26 pc: u32,
27 xpsr: u32,
28}
29
30#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
31unsafe extern "C" fn hard_fault_handler_kernel(faulting_stack: *mut u32) -> ! {
34 let hardfault_stacked_registers = HardFaultStackedRegisters {
35 r0: *faulting_stack.offset(0),
36 r1: *faulting_stack.offset(1),
37 r2: *faulting_stack.offset(2),
38 r3: *faulting_stack.offset(3),
39 r12: *faulting_stack.offset(4),
40 lr: *faulting_stack.offset(5),
41 pc: *faulting_stack.offset(6),
42 xpsr: *faulting_stack.offset(7),
43 };
44
45 panic!(
46 "Kernel HardFault.\r\n\
47 \tKernel version {}\r\n\
48 \tr0 0x{:x}\r\n\
49 \tr1 0x{:x}\r\n\
50 \tr2 0x{:x}\r\n\
51 \tr3 0x{:x}\r\n\
52 \tr12 0x{:x}\r\n\
53 \tlr 0x{:x}\r\n\
54 \tpc 0x{:x}\r\n\
55 \txpsr 0x{:x}\r\n\
56 ",
57 option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
58 hardfault_stacked_registers.r0,
59 hardfault_stacked_registers.r1,
60 hardfault_stacked_registers.r2,
61 hardfault_stacked_registers.r3,
62 hardfault_stacked_registers.r12,
63 hardfault_stacked_registers.lr,
64 hardfault_stacked_registers.pc,
65 hardfault_stacked_registers.xpsr
66 );
67}
68
69#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
71unsafe extern "C" fn generic_isr() {
72 unimplemented!()
73}
74
75#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
76#[unsafe(naked)]
77unsafe extern "C" fn generic_isr() {
79 use core::arch::naked_asm;
80 naked_asm!(
81 "
82 /* Skip saving process state if not coming from user-space */
83 ldr r0, 300f // MEXC_RETURN_PSP
84 cmp lr, r0
85 bne 100f
86
87 /* We need to make sure the kernel cotinues the execution after this ISR */
88 movs r0, #0
89 msr CONTROL, r0
90 /* CONTROL writes must be followed by ISB */
91 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
92 isb
93
94 /* We need the most recent kernel's version of r1, which points */
95 /* to the Process struct's stored registers field. The kernel's r1 */
96 /* lives in the second word of the hardware stacked registers on MSP */
97 mov r1, sp
98 ldr r1, [r1, #4]
99 str r4, [r1, #16]
100 str r5, [r1, #20]
101 str r6, [r1, #24]
102 str r7, [r1, #28]
103
104 push {{r4-r7}}
105 mov r4, r8
106 mov r5, r9
107 mov r6, r10
108 mov r7, r11
109 str r4, [r1, #0]
110 str r5, [r1, #4]
111 str r6, [r1, #8]
112 str r7, [r1, #12]
113 pop {{r4-r7}}
114
115 ldr r0, 200f // MEXC_RETURN_MSP
116 mov lr, r0
117100: // _ggeneric_isr_no_stacking
118 /* Find the ISR number by looking at the low byte of the IPSR registers */
119 mrs r0, IPSR
120 movs r1, #0xff
121 ands r0, r1
122 /* ISRs start at 16, so subtract 16 to get zero-indexed */
123 subs r0, r0, #16
124
125 /*
126 * High level:
127 * NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
128 * */
129 /* r3 = &NVIC.ICER[r0 / 32] */
130 ldr r2, 101f /* r2 = &NVIC.ICER */
131 lsrs r3, r0, #5 /* r3 = r0 / 32 */
132 lsls r3, r3, #2 /* ICER is word-sized, so multiply offset by 4 */
133 adds r3, r3, r2 /* r3 = r2 + r3 */
134
135 /* r2 = 1 << (r0 & 31) */
136 movs r2, #31 /* r2 = 31 */
137 ands r0, r2 /* r0 = r0 & r2 */
138 subs r2, r2, #30 /* r2 = r2 - 30 i.e. r2 = 1 */
139 lsls r2, r2, r0 /* r2 = 1 << r0 */
140
141 /* *r3 = r2 */
142 str r2, [r3]
143
144 /* The pending bit in ISPR might be reset by hardware for pulse interrupts
145 * at this point. So set it here again so the interrupt does not get lost
146 * in service_pending_interrupts()
147 *
148 * The NVIC.ISPR base is 0xE000E200, which is 0x20 (aka #32) above the
149 * NVIC.ICER base. Calculate the ISPR address by offsetting from the ICER
150 * address so as to avoid re-doing the [r0 / 32] index math.
151 */
152 adds r3, #32
153 str r2, [r3]
154
155 bx lr /* return here since we have extra words in the assembly */
156
157.align 4
158101: // NVICICER
159 .word 0xE000E180
160200: // MEXC_RETURN_MSP
161 .word 0xFFFFFFF9
162300: // MEXC_RETURN_PSP
163 .word 0xFFFFFFFD",
164 );
165}
166
167#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
169unsafe extern "C" fn systick_handler() {
170 unimplemented!()
171}
172
173#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
180#[unsafe(naked)]
181unsafe extern "C" fn systick_handler() {
182 use core::arch::naked_asm;
183 naked_asm!(
184 "
185 // Set thread mode to privileged to switch back to kernel mode.
186 movs r0, #0
187 msr CONTROL, r0
188 /* CONTROL writes must be followed by ISB */
189 /* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html */
190 isb
191
192 ldr r0, 100f // ST_EXC_RETURN_MSP
193
194 // This will resume in the switch to user function where application state
195 // is saved and the scheduler can choose what to do next.
196 bx r0
197.align 4
198100: // ST_EXC_RETURN_MSP
199 .word 0xFFFFFFF9
200 ",
201 );
202}
203
204#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
206unsafe extern "C" fn svc_handler() {
207 unimplemented!()
208}
209
210#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
211#[unsafe(naked)]
212unsafe extern "C" fn svc_handler() {
213 use core::arch::naked_asm;
214 naked_asm!(
215 "
216 ldr r0, 200f // EXC_RETURN_MSP
217 cmp lr, r0
218 bne 100f
219 movs r0, #1
220 msr CONTROL, r0
221 /* CONTROL writes must be followed by ISB */
222 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
223 isb
224 ldr r1, 300f // EXC_RETURN_PSP
225 bx r1
226
227100: // to_kernel
228 movs r0, #0
229 msr CONTROL, r0
230 /* CONTROL writes must be followed by ISB */
231 /* https://developer.arm.com/documentation/dui0662/b/The-Cortex-M0--Processor/Programmers-model/Core-registers */
232 isb
233 ldr r0, =SYSCALL_FIRED
234 movs r1, #1
235 str r1, [r0, #0]
236 ldr r1, 200f
237 bx r1
238
239.align 4
240200: // EXC_RETURN_MSP
241 .word 0xFFFFFFF9
242300: // EXC_RETURN_PSP
243 .word 0xFFFFFFFD
244 ",
245 );
246}
247
248#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
250unsafe extern "C" fn hard_fault_handler() {
251 unimplemented!()
252}
253
254#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
255#[unsafe(naked)]
256unsafe extern "C" fn hard_fault_handler() {
257 use core::arch::naked_asm;
258 naked_asm!("
261 /*
262 * Will be incremented to 1 when we determine that it was a fault
263 * in the kernel
264 */
265 movs r1, #0
266 /*
267 * r2 is used for testing and r3 is used to store lr
268 */
269 mov r3, lr
270
271 movs r2, #4
272 tst r3, r2
273 beq 100f
274
275// _hardfault_psp:
276 mrs r0, psp
277 b 200f
278
279100: // _hardfault_msp
280 mrs r0, msp
281 adds r1, #1
282
283200: // _hardfault_exit
284
285 // If the hard-fault occurred while executing the kernel (r1 != 0),
286 // jump to the non-naked kernel hard fault handler. This handler
287 // MUST NOT return. The faulting stack is passed as the first argument
288 // (r0).
289 cmp r1, #0 // Check if app (r1==0) or kernel (r1==1) fault.
290 beq 400f // If app fault, skip to app handling.
291 ldr r2, ={kernel_hard_fault_handler} // Load address of fault handler.
292 bx r2 // Branch to the non-naked fault handler.
293
294400: // _hardfault_app
295 // Otherwise, store that a hardfault occurred in an app, store some CPU
296 // state and finally return to the kernel stack:
297 ldr r0, =APP_HARD_FAULT
298 movs r1, #1 /* Fault */
299 str r1, [r0, #0]
300
301 /*
302 * NOTE:
303 * -----
304 *
305 * Even though ARMv6-M SCB and Control registers
306 * are different from ARMv7-M, they are still compatible
307 * with each other. So, we can keep the same code as
308 * ARMv7-M.
309 *
310 * ARMv6-M however has no _privileged_ mode.
311 */
312
313 /* Read the SCB registers. */
314 ldr r0, =SCB_REGISTERS
315 ldr r1, =0xE000ED14
316 ldr r2, [r1, #0] /* CCR */
317 str r2, [r0, #0]
318 ldr r2, [r1, #20] /* CFSR */
319 str r2, [r0, #4]
320 ldr r2, [r1, #24] /* HFSR */
321 str r2, [r0, #8]
322 ldr r2, [r1, #32] /* MMFAR */
323 str r2, [r0, #12]
324 ldr r2, [r1, #36] /* BFAR */
325 str r2, [r0, #16]
326
327 /* Set thread mode to privileged */
328 movs r0, #0
329 msr CONTROL, r0
330 /* No ISB required on M0 */
331 /* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html */
332
333 // Load the FEXC_RETURN_MSP LR address and return to it, to switch to the
334 // kernel (MSP) stack:
335 ldr r0, 300f
336 mov lr, r0
337 bx lr
338
339 .align 4
340300: // FEXC_RETURN_MSP
341 .word 0xFFFFFFF9
342 ",
343 kernel_hard_fault_handler = sym hard_fault_handler_kernel,
344 );
345}
346
347pub enum CortexM0 {}
351
352impl cortexm::CortexMVariant for CortexM0 {
353 const GENERIC_ISR: unsafe extern "C" fn() = generic_isr;
354 const SYSTICK_HANDLER: unsafe extern "C" fn() = systick_handler;
355 const SVC_HANDLER: unsafe extern "C" fn() = svc_handler;
356 const HARD_FAULT_HANDLER: unsafe extern "C" fn() = hard_fault_handler;
357
358 #[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
360 unsafe fn switch_to_user(
361 _user_stack: *const usize,
362 _process_regs: &mut [usize; 8],
363 ) -> *const usize {
364 unimplemented!()
365 }
366
367 #[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
368 unsafe fn switch_to_user(
369 mut user_stack: *const usize,
370 process_regs: &mut [usize; 8],
371 ) -> *const usize {
372 use core::arch::asm;
373 asm!("
374 // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
375 // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
376 // the frame pointer. However, in the process of restoring and saving the
377 // process's registers, we do in fact clobber r6, r7 and r9. So, we work
378 // around this by doing our own manual saving of r6 using r2, r7 using r3,
379 // r9 using r12, and then mark those as clobbered.
380 mov r2, r6
381 mov r3, r7
382 mov r12, r9
383
384 /* Load non-hardware-stacked registers from Process stack */
385 ldmia r1!, {{r4-r7}}
386 mov r11, r7
387 mov r10, r6
388 mov r9, r5
389 mov r8, r4
390 ldmia r1!, {{r4-r7}}
391 subs r1, 32 /* Restore pointer to process_regs
392 /* ldmia! added a 32-byte offset */
393
394 /* Load bottom of stack into Process Stack Pointer */
395 msr psp, r0
396
397 /* SWITCH */
398 svc 0xff /* It doesn't matter which SVC number we use here */
399
400 /* Store non-hardware-stacked registers in process_regs */
401 /* r1 still points to process_regs because we are clobbering all */
402 /* non-hardware-stacked registers */
403 str r4, [r1, #16]
404 str r5, [r1, #20]
405 str r6, [r1, #24]
406 str r7, [r1, #28]
407
408 mov r4, r8
409 mov r5, r9
410 mov r6, r10
411 mov r7, r11
412
413 str r4, [r1, #0]
414 str r5, [r1, #4]
415 str r6, [r1, #8]
416 str r7, [r1, #12]
417
418 mrs r0, PSP /* PSP into user_stack */
419
420 // Manually restore r6, r7 and r9.
421 mov r6, r2
422 mov r7, r3
423 mov r9, r12
424
425 ",
426 inout("r0") user_stack,
427 in("r1") process_regs,
428 out("r2") _, out("r3") _, out("r4") _, out("r5") _, out("r8") _,
429 out("r10") _, out("r11") _, out("r12") _);
430
431 user_stack
432 }
433
434 #[inline]
435 unsafe fn print_cortexm_state(writer: &mut dyn Write) {
436 cortexm::print_cortexm_state(writer)
437 }
438}