1#![no_std]
8
9extern "C" {
11 static _estack: u8;
12 static _sstack: u8;
13}
14
15#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
20#[unsafe(naked)]
21pub unsafe extern "C" fn systick_handler_arm_v7m() {
22 use core::arch::naked_asm;
23 naked_asm!(
24 "
25 // Use the CONTROL register to set the thread mode to privileged to switch
26 // back to kernel mode.
27 //
28 // CONTROL[1]: Stack status
29 // 0 = Default stack (MSP) is used
30 // 1 = Alternate stack is used
31 // CONTROL[0]: Mode
32 // 0 = Privileged in thread mode
33 // 1 = User state in thread mode
34 mov r0, #0 // r0 = 0
35 msr CONTROL, r0 // CONTROL = 0
36 // CONTROL writes must be followed by an Instruction Synchronization Barrier
37 // (ISB). https://developer.arm.com/documentation/dai0321/latest
38 isb // synchronization barrier
39
40 // The link register is set to the `EXC_RETURN` value on exception entry. To
41 // ensure we continue executing in the kernel we ensure the SPSEL bit is set
42 // to 0 to use the main (kernel) stack.
43 bfc lr, #2, #1 // LR = LR & !(0x1<<2)
44
45 // This will resume in the switch_to_user function where application state
46 // is saved and the scheduler can choose what to do next.
47 bx lr
48 ",
49 );
50}
51
52#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
57#[unsafe(naked)]
58pub unsafe extern "C" fn svc_handler_arm_v7m() {
59 use core::arch::naked_asm;
60 naked_asm!(
61 "
62 // First check to see which direction we are going in. If the link register
63 // (containing EXC_RETURN) has a 1 in the SPSEL bit (meaning the
64 // alternative/process stack was in use) then we are coming from a process
65 // which has called a syscall.
66 ubfx r0, lr, #2, #1 // r0 = (LR & (0x1<<2)) >> 2
67 cmp r0, #0 // r0 (SPSEL bit) =≟ 0
68 bne 100f // to_kernel // if SPSEL == 1, jump to to_kernel
69
70 // If we get here, then this is a context switch from the kernel to the
71 // application. Use the CONTROL register to set the thread mode to
72 // unprivileged to run the application.
73 //
74 // CONTROL[1]: Stack status
75 // 0 = Default stack (MSP) is used
76 // 1 = Alternate stack is used
77 // CONTROL[0]: Mode
78 // 0 = Privileged in thread mode
79 // 1 = User state in thread mode
80 mov r0, #1 // r0 = 1
81 msr CONTROL, r0 // CONTROL = 1
82 // CONTROL writes must be followed by an Instruction Synchronization Barrier
83 // (ISB). https://developer.arm.com/documentation/dai0321/latest
84 isb
85
86 // The link register is set to the `EXC_RETURN` value on exception entry. To
87 // ensure we execute using the process stack we set the SPSEL bit to 1
88 // to use the alternate (process) stack.
89 orr lr, lr, #4 // LR = LR | 0b100
90
91 // Switch to the app.
92 bx lr
93
94 100: // to_kernel
95 // An application called a syscall. We mark this in the global variable
96 // `SYSCALL_FIRED` which is stored in the syscall file.
97 // `UserspaceKernelBoundary` will use this variable to decide why the app
98 // stopped executing.
99 ldr r0, =SYSCALL_FIRED // r0 = &SYSCALL_FIRED
100 mov r1, #1 // r1 = 1
101 str r1, [r0] // *SYSCALL_FIRED = 1
102
103 // Use the CONTROL register to set the thread mode to privileged to switch
104 // back to kernel mode.
105 //
106 // CONTROL[1]: Stack status
107 // 0 = Default stack (MSP) is used
108 // 1 = Alternate stack is used
109 // CONTROL[0]: Mode
110 // 0 = Privileged in thread mode
111 // 1 = User state in thread mode
112 mov r0, #0 // r0 = 0
113 msr CONTROL, r0 // CONTROL = 0
114 // CONTROL writes must be followed by an Instruction Synchronization Barrier
115 // (ISB). https://developer.arm.com/documentation/dai0321/latest
116 isb
117
118 // The link register is set to the `EXC_RETURN` value on exception entry. To
119 // ensure we continue executing in the kernel we ensure the SPSEL bit is set
120 // to 0 to use the main (kernel) stack.
121 bfc lr, #2, #1 // LR = LR & !(0x1<<2)
122
123 // Return to the kernel.
124 bx lr
125 ",
126 );
127}
128
129#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
133#[unsafe(naked)]
134pub unsafe extern "C" fn generic_isr_arm_v7m() {
135 use core::arch::naked_asm;
136 naked_asm!(
137 "
138 // Use the CONTROL register to set the thread mode to privileged to ensure
139 // we are executing as the kernel. This may be redundant if the interrupt
140 // happened while the kernel code was executing.
141 //
142 // CONTROL[1]: Stack status
143 // 0 = Default stack (MSP) is used
144 // 1 = Alternate stack is used
145 // CONTROL[0]: Mode
146 // 0 = Privileged in thread mode
147 // 1 = User state in thread mode
148 mov r0, #0 // r0 = 0
149 msr CONTROL, r0 // CONTROL = 0
150 // CONTROL writes must be followed by an Instruction Synchronization Barrier
151 // (ISB). https://developer.arm.com/documentation/dai0321/latest
152 isb
153
154 // Now need to disable the interrupt that fired in the NVIC to ensure it
155 // does not trigger again before the scheduler has a chance to handle it. We
156 // do this here in assembly for performance.
157 //
158 // The general idea is:
159 // 1. Get the index of the interrupt that occurred.
160 // 2. Set the disable bit for that interrupt in the NVIC.
161
162 // Find the ISR number (`index`) by looking at the low byte of the IPSR
163 // registers.
164 mrs r0, IPSR // r0 = Interrupt Program Status Register (IPSR)
165 and r0, #0xff // r0 = r0 & 0xFF; Get lowest 8 bits
166 sub r0, #16 // r0 = r0 - 16; ISRs start at 16, so subtract 16 to get zero-indexed.
167
168 // Now disable that interrupt in the NVIC.
169 // High level:
170 // r0 = index
171 // NVIC.ICER[r0 / 32] = 1 << (r0 & 31)
172 lsrs r2, r0, #5 // r2 = r0 / 32
173 // r0 = 1 << (r0 & 31)
174 movs r3, #1 // r3 = 1
175 and r0, r0, #31 // r0 = r0 & 31
176 lsl r0, r3, r0 // r0 = r3 << r0
177
178 // Load the ICER register address.
179 ldr r3, =0xe000e180 // r3 = &NVIC.ICER
180
181 // Here:
182 // - `r2` is index / 32
183 // - `r3` is &NVIC.ICER
184 // - `r0` is 1 << (index & 31)
185 str r0, [r3, r2, lsl #2] // *(r3 + r2 * 4) = r0
186
187 // The pending bit in ISPR might be reset by hardware for pulse interrupts
188 // at this point. So set it here again so the interrupt does not get lost in
189 // `service_pending_interrupts()`.
190 ldr r3, =0xe000e200 // r3 = &NVIC.ISPR
191 str r0, [r3, r2, lsl #2] // *(r3 + r2 * 4) = r0
192
193 // The link register is set to the `EXC_RETURN` value on exception entry. To
194 // ensure we continue executing in the kernel we ensure the SPSEL bit is set
195 // to 0 to use the main (kernel) stack.
196 bfc lr, #2, #1 // LR = LR & !(0x1<<2)
197
198 // Now we can return from the interrupt context and resume what we were
199 // doing. If an app was executing we will switch to the kernel so it can
200 // choose whether to service the interrupt.
201 bx lr
202 ",
203 );
204}
205
206#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
212pub unsafe fn switch_to_user_arm_v7m(
213 mut user_stack: *const usize,
214 process_regs: &mut [usize; 8],
215) -> *const usize {
216 use core::arch::asm;
217 asm!(
218 "
219 // Rust `asm!()` macro (as of May 2021) will not let us mark r6, r7 and r9
220 // as clobbers. r6 and r9 is used internally by LLVM, and r7 is used for
221 // the frame pointer. However, in the process of restoring and saving the
222 // process's registers, we do in fact clobber r6, r7 and r9. So, we work
223 // around this by doing our own manual saving of r6 using r2, r7 using r3,
224 // r9 using r12, and then mark those as clobbered.
225 mov r2, r6 // r2 = r6
226 mov r3, r7 // r3 = r7
227 mov r12, r9 // r12 = r9
228
229 // The arguments passed in are:
230 // - `r0` is the bottom of the user stack
231 // - `r1` is a reference to `CortexMStoredState.regs`
232
233 // Load bottom of stack into Process Stack Pointer.
234 msr psp, r0 // PSP = r0
235
236 // Load non-hardware-stacked registers from the process stored state. Ensure
237 // that the address register (right now r1) is stored in a callee saved
238 // register.
239 ldmia r1, {{r4-r11}} // r4 = r1[0], r5 = r1[1], ...
240
241 // Generate a SVC exception to handle the context switch from kernel to
242 // userspace. It doesn't matter which SVC number we use here as it is not
243 // used in the exception handler. Data being returned from a syscall is
244 // transferred on the app's stack.
245 svc 0xff
246
247 // When execution returns here we have switched back to the kernel from the
248 // application.
249
250 // Push non-hardware-stacked registers into the saved state for the
251 // application.
252 stmia r1, {{r4-r11}} // r1[0] = r4, r1[1] = r5, ...
253
254 // Update the user stack pointer with the current value after the
255 // application has executed.
256 mrs r0, PSP // r0 = PSP
257
258 // Need to restore r6, r7 and r12 since we clobbered them when switching to
259 // and from the app.
260 mov r6, r2 // r6 = r2
261 mov r7, r3 // r7 = r3
262 mov r9, r12 // r9 = r12
263 ",
264 inout("r0") user_stack,
265 in("r1") process_regs,
266 out("r2") _, out("r3") _, out("r4") _, out("r5") _, out("r8") _, out("r10") _,
267 out("r11") _, out("r12") _);
268
269 user_stack
270}
271
272#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
273unsafe extern "C" fn hard_fault_handler_arm_v7m_kernel(
276 faulting_stack: *mut u32,
277 stack_overflow: u32,
278) -> ! {
279 if stack_overflow != 0 {
280 panic!("kernel stack overflow");
282 } else {
283 let stacked_r0: u32 = *faulting_stack.offset(0);
285 let stacked_r1: u32 = *faulting_stack.offset(1);
286 let stacked_r2: u32 = *faulting_stack.offset(2);
287 let stacked_r3: u32 = *faulting_stack.offset(3);
288 let stacked_r12: u32 = *faulting_stack.offset(4);
289 let stacked_lr: u32 = *faulting_stack.offset(5);
290 let stacked_pc: u32 = *faulting_stack.offset(6);
291 let stacked_xpsr: u32 = *faulting_stack.offset(7);
292
293 let mode_str = "Kernel";
294
295 let shcsr: u32 = core::ptr::read_volatile(0xE000ED24 as *const u32);
296 let cfsr: u32 = core::ptr::read_volatile(0xE000ED28 as *const u32);
297 let hfsr: u32 = core::ptr::read_volatile(0xE000ED2C as *const u32);
298 let mmfar: u32 = core::ptr::read_volatile(0xE000ED34 as *const u32);
299 let bfar: u32 = core::ptr::read_volatile(0xE000ED38 as *const u32);
300
301 let iaccviol = (cfsr & 0x01) == 0x01;
302 let daccviol = (cfsr & 0x02) == 0x02;
303 let munstkerr = (cfsr & 0x08) == 0x08;
304 let mstkerr = (cfsr & 0x10) == 0x10;
305 let mlsperr = (cfsr & 0x20) == 0x20;
306 let mmfarvalid = (cfsr & 0x80) == 0x80;
307
308 let ibuserr = ((cfsr >> 8) & 0x01) == 0x01;
309 let preciserr = ((cfsr >> 8) & 0x02) == 0x02;
310 let impreciserr = ((cfsr >> 8) & 0x04) == 0x04;
311 let unstkerr = ((cfsr >> 8) & 0x08) == 0x08;
312 let stkerr = ((cfsr >> 8) & 0x10) == 0x10;
313 let lsperr = ((cfsr >> 8) & 0x20) == 0x20;
314 let bfarvalid = ((cfsr >> 8) & 0x80) == 0x80;
315
316 let undefinstr = ((cfsr >> 16) & 0x01) == 0x01;
317 let invstate = ((cfsr >> 16) & 0x02) == 0x02;
318 let invpc = ((cfsr >> 16) & 0x04) == 0x04;
319 let nocp = ((cfsr >> 16) & 0x08) == 0x08;
320 let unaligned = ((cfsr >> 16) & 0x100) == 0x100;
321 let divbysero = ((cfsr >> 16) & 0x200) == 0x200;
322
323 let vecttbl = (hfsr & 0x02) == 0x02;
324 let forced = (hfsr & 0x40000000) == 0x40000000;
325
326 let ici_it = (((stacked_xpsr >> 25) & 0x3) << 6) | ((stacked_xpsr >> 10) & 0x3f);
327 let thumb_bit = ((stacked_xpsr >> 24) & 0x1) == 1;
328 let exception_number = (stacked_xpsr & 0x1ff) as usize;
329
330 panic!(
331 "{} HardFault.\r\n\
332 \tKernel version {}\r\n\
333 \tr0 0x{:x}\r\n\
334 \tr1 0x{:x}\r\n\
335 \tr2 0x{:x}\r\n\
336 \tr3 0x{:x}\r\n\
337 \tr12 0x{:x}\r\n\
338 \tlr 0x{:x}\r\n\
339 \tpc 0x{:x}\r\n\
340 \tpsr 0x{:x} [ N {} Z {} C {} V {} Q {} GE {}{}{}{} ; ICI.IT {} T {} ; Exc {}-{} ]\r\n\
341 \tsp 0x{:x}\r\n\
342 \ttop of stack 0x{:x}\r\n\
343 \tbottom of stack 0x{:x}\r\n\
344 \tSHCSR 0x{:x}\r\n\
345 \tCFSR 0x{:x}\r\n\
346 \tHSFR 0x{:x}\r\n\
347 \tInstruction Access Violation: {}\r\n\
348 \tData Access Violation: {}\r\n\
349 \tMemory Management Unstacking Fault: {}\r\n\
350 \tMemory Management Stacking Fault: {}\r\n\
351 \tMemory Management Lazy FP Fault: {}\r\n\
352 \tInstruction Bus Error: {}\r\n\
353 \tPrecise Data Bus Error: {}\r\n\
354 \tImprecise Data Bus Error: {}\r\n\
355 \tBus Unstacking Fault: {}\r\n\
356 \tBus Stacking Fault: {}\r\n\
357 \tBus Lazy FP Fault: {}\r\n\
358 \tUndefined Instruction Usage Fault: {}\r\n\
359 \tInvalid State Usage Fault: {}\r\n\
360 \tInvalid PC Load Usage Fault: {}\r\n\
361 \tNo Coprocessor Usage Fault: {}\r\n\
362 \tUnaligned Access Usage Fault: {}\r\n\
363 \tDivide By Zero: {}\r\n\
364 \tBus Fault on Vector Table Read: {}\r\n\
365 \tForced Hard Fault: {}\r\n\
366 \tFaulting Memory Address: (valid: {}) {:#010X}\r\n\
367 \tBus Fault Address: (valid: {}) {:#010X}\r\n\
368 ",
369 mode_str,
370 option_env!("TOCK_KERNEL_VERSION").unwrap_or("unknown"),
371 stacked_r0,
372 stacked_r1,
373 stacked_r2,
374 stacked_r3,
375 stacked_r12,
376 stacked_lr,
377 stacked_pc,
378 stacked_xpsr,
379 (stacked_xpsr >> 31) & 0x1,
380 (stacked_xpsr >> 30) & 0x1,
381 (stacked_xpsr >> 29) & 0x1,
382 (stacked_xpsr >> 28) & 0x1,
383 (stacked_xpsr >> 27) & 0x1,
384 (stacked_xpsr >> 19) & 0x1,
385 (stacked_xpsr >> 18) & 0x1,
386 (stacked_xpsr >> 17) & 0x1,
387 (stacked_xpsr >> 16) & 0x1,
388 ici_it,
389 thumb_bit,
390 exception_number,
391 ipsr_isr_number_to_str(exception_number),
392 faulting_stack as u32,
393 core::ptr::addr_of!(_estack) as u32,
394 core::ptr::addr_of!(_sstack) as u32,
395 shcsr,
396 cfsr,
397 hfsr,
398 iaccviol,
399 daccviol,
400 munstkerr,
401 mstkerr,
402 mlsperr,
403 ibuserr,
404 preciserr,
405 impreciserr,
406 unstkerr,
407 stkerr,
408 lsperr,
409 undefinstr,
410 invstate,
411 invpc,
412 nocp,
413 unaligned,
414 divbysero,
415 vecttbl,
416 forced,
417 mmfarvalid,
418 mmfar,
419 bfarvalid,
420 bfar
421 );
422 }
423}
424
425#[cfg(any(doc, all(target_arch = "arm", target_os = "none")))]
430#[unsafe(naked)]
431pub unsafe extern "C" fn hard_fault_handler_arm_v7m() {
432 use core::arch::naked_asm;
433 naked_asm!(
439 "
440 mov r2, 0 // r2 = 0
441 tst lr, #4 // bitwise AND link register to 0b100
442 itte eq // if lr==4, run next two instructions, else, run 3rd instruction.
443 mrseq r0, msp // r0 = kernel stack pointer
444 addeq r2, 1 // r2 = 1, kernel was executing
445 mrsne r0, psp // r0 = userland stack pointer
446 // Need to determine if we had a stack overflow before we push anything
447 // on to the stack. We check this by looking at the BusFault Status
448 // Register's (BFSR) `LSPERR` and `STKERR` bits to see if the hardware
449 // had any trouble stacking important registers to the stack during the
450 // fault. If so, then we cannot use this stack while handling this fault
451 // or we will trigger another fault.
452 ldr r3, =0xE000ED29 // SCB BFSR register address
453 ldrb r3, [r3] // r3 = BFSR
454 tst r3, #0x30 // r3 = BFSR & 0b00110000; LSPERR & STKERR bits
455 ite ne // check if the result of that bitwise AND was not 0
456 movne r1, #1 // BFSR & 0b00110000 != 0; r1 = 1
457 moveq r1, #0 // BFSR & 0b00110000 == 0; r1 = 0
458 and r5, r2, r1 // bitwise and r1 and r2, store in r5
459 cmp r5, #1 // update condition codes to reflect if r1 == 1 && r2 == 1
460 itt eq // if r5==1 run the next 2 instructions, else skip to branch
461 // if true, The hardware couldn't use the stack, so we have no saved data and
462 // we cannot use the kernel stack as is. We just want to report that
463 // the kernel's stack overflowed, since that is essential for
464 // debugging.
465 //
466 // To make room for a panic!() handler stack, we just re-use the
467 // kernel's original stack. This should in theory leave the bottom
468 // of the stack where the problem occurred untouched should one want
469 // to further debug.
470 ldreq r4, ={estack} // load _estack into r4
471 moveq sp, r4 // Set the stack pointer to _estack
472 // finally, if the fault occurred in privileged mode (r2 == 1), branch
473 // to non-naked handler.
474 cmp r2, #0
475 // Per ARM calling convention, faulting stack is passed in r0, whether
476 // there was a stack overflow in r1. This function must never return.
477 bne {kernel_hard_fault_handler} // branch to kernel hard fault handler
478 // Otherwise, the hard fault occurred in userspace. In this case, read
479 // the relevant SCB registers:
480 ldr r0, =SCB_REGISTERS // Global variable address
481 ldr r1, =0xE000ED14 // SCB CCR register address
482 ldr r2, [r1, #0] // CCR
483 str r2, [r0, #0]
484 ldr r2, [r1, #20] // CFSR
485 str r2, [r0, #4]
486 ldr r2, [r1, #24] // HFSR
487 str r2, [r0, #8]
488 ldr r2, [r1, #32] // MMFAR
489 str r2, [r0, #12]
490 ldr r2, [r1, #36] // BFAR
491 str r2, [r0, #16]
492
493 ldr r0, =APP_HARD_FAULT // Global variable address
494 mov r1, #1 // r1 = 1
495 str r1, [r0, #0] // APP_HARD_FAULT = 1
496
497 // Set thread mode to privileged
498 mov r0, #0
499 msr CONTROL, r0
500 // CONTROL writes must be followed by ISB
501 // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHFJCAC.html
502 isb
503
504 // The link register is set to the `EXC_RETURN` value on exception
505 // entry. To ensure we continue executing in the kernel we ensure the
506 // SPSEL bit is set to 0 to use the main (kernel) stack.
507 bfc lr, #2, #1 // LR = LR & !(0x1<<2)
508
509 bx lr",
510 estack = sym _estack,
511 kernel_hard_fault_handler = sym hard_fault_handler_arm_v7m_kernel,
512 );
513}
514
515pub fn ipsr_isr_number_to_str(isr_number: usize) -> &'static str {
518 match isr_number {
519 0 => "Thread Mode",
520 1 => "Reserved",
521 2 => "NMI",
522 3 => "HardFault",
523 4 => "MemManage",
524 5 => "BusFault",
525 6 => "UsageFault",
526 7..=10 => "Reserved",
527 11 => "SVCall",
528 12 => "Reserved for Debug",
529 13 => "Reserved",
530 14 => "PendSV",
531 15 => "SysTick",
532 16..=255 => "IRQn",
533 _ => "(Unknown! Illegal value?)",
534 }
535}
536
537#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
545pub unsafe extern "C" fn systick_handler_arm_v7m() {
546 unimplemented!()
547}
548
549#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
550pub unsafe extern "C" fn svc_handler_arm_v7m() {
551 unimplemented!()
552}
553
554#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
555pub unsafe extern "C" fn generic_isr_arm_v7m() {
556 unimplemented!()
557}
558
559#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
560pub unsafe extern "C" fn switch_to_user_arm_v7m(
561 _user_stack: *const u8,
562 _process_regs: &mut [usize; 8],
563) -> *const usize {
564 unimplemented!()
565}
566
567#[cfg(not(any(doc, all(target_arch = "arm", target_os = "none"))))]
568pub unsafe extern "C" fn hard_fault_handler_arm_v7m() {
569 unimplemented!()
570}