1use core::fmt::Write;
8use core::mem::size_of;
9use core::ops::Range;
10
11use crate::csr::mcause;
12use kernel::errorcode::ErrorCode;
13use kernel::syscall::ContextSwitchReason;
14
15#[derive(Default)]
18#[repr(C)]
19pub struct Riscv32iStoredState {
20 regs: [u32; 31],
22
23 pc: u32,
27
28 mcause: u32,
31
32 mtval: u32,
36}
37
38const R_RA: usize = 0;
42const R_SP: usize = 1;
43const R_A0: usize = 9;
44const R_A1: usize = 10;
45const R_A2: usize = 11;
46const R_A3: usize = 12;
47const R_A4: usize = 13;
48
49const VERSION: u32 = 1;
51const STORED_STATE_SIZE: u32 = size_of::<Riscv32iStoredState>() as u32;
52const TAG: [u8; 4] = [b'r', b'v', b'5', b'i'];
53const METADATA_LEN: usize = 3;
54
55const VERSION_IDX: usize = 0;
56const SIZE_IDX: usize = 1;
57const TAG_IDX: usize = 2;
58const PC_IDX: usize = 3;
59const MCAUSE_IDX: usize = 4;
60const MTVAL_IDX: usize = 5;
61const REGS_IDX: usize = 6;
62const REGS_RANGE: Range<usize> = REGS_IDX..REGS_IDX + 31;
63
64const U32_SZ: usize = size_of::<u32>();
65fn u32_byte_range(index: usize) -> Range<usize> {
66 index * U32_SZ..(index + 1) * U32_SZ
67}
68
69fn u32_from_u8_slice(slice: &[u8], index: usize) -> Result<u32, ErrorCode> {
70 let range = u32_byte_range(index);
71 Ok(u32::from_le_bytes(
72 slice
73 .get(range)
74 .ok_or(ErrorCode::SIZE)?
75 .try_into()
76 .or(Err(ErrorCode::FAIL))?,
77 ))
78}
79
80fn write_u32_to_u8_slice(val: u32, slice: &mut [u8], index: usize) {
81 let range = u32_byte_range(index);
82 slice[range].copy_from_slice(&val.to_le_bytes());
83}
84
85impl core::convert::TryFrom<&[u8]> for Riscv32iStoredState {
86 type Error = ErrorCode;
87 fn try_from(ss: &[u8]) -> Result<Riscv32iStoredState, Self::Error> {
88 if ss.len() == size_of::<Riscv32iStoredState>() + METADATA_LEN * U32_SZ
89 && u32_from_u8_slice(ss, VERSION_IDX)? == VERSION
90 && u32_from_u8_slice(ss, SIZE_IDX)? == STORED_STATE_SIZE
91 && u32_from_u8_slice(ss, TAG_IDX)? == u32::from_le_bytes(TAG)
92 {
93 let mut res = Riscv32iStoredState {
94 regs: [0; 31],
95 pc: u32_from_u8_slice(ss, PC_IDX)?,
96 mcause: u32_from_u8_slice(ss, MCAUSE_IDX)?,
97 mtval: u32_from_u8_slice(ss, MTVAL_IDX)?,
98 };
99 for (i, v) in (REGS_RANGE).enumerate() {
100 res.regs[i] = u32_from_u8_slice(ss, v)?;
101 }
102 Ok(res)
103 } else {
104 Err(ErrorCode::FAIL)
105 }
106 }
107}
108
109pub struct SysCall(());
111
112impl SysCall {
113 pub const unsafe fn new() -> SysCall {
114 SysCall(())
115 }
116}
117
118impl kernel::syscall::UserspaceKernelBoundary for SysCall {
119 type StoredState = Riscv32iStoredState;
120
121 fn initial_process_app_brk_size(&self) -> usize {
122 0
127 }
128
129 unsafe fn initialize_process(
130 &self,
131 accessible_memory_start: *const u8,
132 _app_brk: *const u8,
133 state: &mut Self::StoredState,
134 ) -> Result<(), ()> {
135 state.regs.iter_mut().for_each(|x| *x = 0);
137 state.pc = 0;
138 state.mcause = 0;
139
140 state.regs[R_SP] = accessible_memory_start as u32;
145
146 Ok(())
148 }
149
150 unsafe fn set_syscall_return_value(
151 &self,
152 _accessible_memory_start: *const u8,
153 _app_brk: *const u8,
154 state: &mut Self::StoredState,
155 return_value: kernel::syscall::SyscallReturn,
156 ) -> Result<(), ()> {
157 let (_, r) = state.regs.split_at_mut(R_A0);
167
168 let (a0slice, r) = r.split_at_mut(R_A1 - R_A0);
172 let (a1slice, r) = r.split_at_mut(R_A2 - R_A1);
173 let (a2slice, a3slice) = r.split_at_mut(R_A3 - R_A2);
174
175 kernel::utilities::arch_helpers::encode_syscall_return_trd104(
176 &kernel::utilities::arch_helpers::TRD104SyscallReturn::from_syscall_return(
177 return_value,
178 ),
179 &mut a0slice[0],
180 &mut a1slice[0],
181 &mut a2slice[0],
182 &mut a3slice[0],
183 );
184
185 Ok(())
187 }
188
189 unsafe fn set_process_function(
190 &self,
191 _accessible_memory_start: *const u8,
192 _app_brk: *const u8,
193 state: &mut Riscv32iStoredState,
194 callback: kernel::process::FunctionCall,
195 ) -> Result<(), ()> {
196 state.regs[R_A0] = callback.argument0 as u32;
199 state.regs[R_A1] = callback.argument1 as u32;
200 state.regs[R_A2] = callback.argument2 as u32;
201 state.regs[R_A3] = callback.argument3.as_usize() as u32;
202
203 state.regs[R_RA] = state.pc;
210
211 state.pc = callback.pc.addr() as u32;
213
214 Ok(())
215 }
216
217 #[cfg(not(any(doc, all(target_arch = "riscv32", target_os = "none"))))]
219 unsafe fn switch_to_process(
220 &self,
221 _accessible_memory_start: *const u8,
222 _app_brk: *const u8,
223 _state: &mut Riscv32iStoredState,
224 ) -> (ContextSwitchReason, Option<*const u8>) {
225 let _cause = mcause::Trap::from(_state.mcause as usize);
227 let _arg4 = _state.regs[R_A4];
228 unimplemented!()
229 }
230
231 #[cfg(any(doc, all(target_arch = "riscv32", target_os = "none")))]
232 unsafe fn switch_to_process(
233 &self,
234 _accessible_memory_start: *const u8,
235 _app_brk: *const u8,
236 state: &mut Riscv32iStoredState,
237 ) -> (ContextSwitchReason, Option<*const u8>) {
238 use core::arch::asm;
239 asm!("
250 // Before switching to the app we need to save some kernel registers
251 // to the kernel stack, specifically ones which we can't mark as
252 // clobbered in the asm!() block. We then save the stack pointer in
253 // the mscratch CSR so we can retrieve it after returning to the
254 // kernel from the app.
255 //
256 // A few values get saved to the kernel stack, including an app
257 // register temporarily after entering the trap handler. Here is a
258 // memory map to make it easier to keep track:
259 //
260 // ```
261 // 8*4(sp): <- original stack pointer
262 // 7*4(sp):
263 // 6*4(sp): x9 / s1
264 // 5*4(sp): x8 / s0 / fp
265 // 4*4(sp): x4 / tp
266 // 3*4(sp): x3 / gp
267 // 2*4(sp): x10 / a0 (*state, Per-process StoredState struct)
268 // 1*4(sp): custom trap handler address
269 // 0*4(sp): scratch space, having s1 written to by the trap handler
270 // <- new stack pointer
271 // ```
272
273 addi sp, sp, -8*4 // Move the stack pointer down to make room.
274
275 // Save all registers on the kernel stack which cannot be clobbered
276 // by an asm!() block. These are mostly registers which have a
277 // designated purpose (e.g. stack pointer) or are used internally
278 // by LLVM.
279 sw x9, 6*4(sp) // s1 (used internally by LLVM)
280 sw x8, 5*4(sp) // fp (can't be clobbered / used as an operand)
281 sw x4, 4*4(sp) // tp (can't be clobbered / used as an operand)
282 sw x3, 3*4(sp) // gp (can't be clobbered / used as an operand)
283
284 sw x10, 2*4(sp) // Store process state pointer on stack as well.
285 // We need to have this available for after the
286 // app returns to the kernel so we can store its
287 // registers.
288
289 // Load the address of `_start_app_trap` into `1*4(sp)`. We swap our
290 // stack pointer into the mscratch CSR and the trap handler will load
291 // and jump to the address at this offset.
292 la t0, 100f // t0 = _start_app_trap
293 sw t0, 1*4(sp) // 1*4(sp) = t0
294
295 // sw x0, 0*4(sp) // Reserved as scratch space for the trap handler
296
297 // -----> All required registers saved to the stack.
298 // sp holds the updated stack pointer, a0 the per-process state
299
300 // From here on we can't allow the CPU to take interrupts anymore, as
301 // we re-route traps to `_start_app_trap` below (by writing our stack
302 // pointer into the mscratch CSR), and we rely on certain CSRs to not
303 // be modified or used in their intermediate states (e.g., mepc).
304 //
305 // We atomically switch to user-mode and re-enable interrupts using
306 // the `mret` instruction below.
307 //
308 // If interrupts are disabled _after_ setting mscratch, this result in
309 // the race condition of [PR 2308](https://github.com/tock/tock/pull/2308)
310
311 // Therefore, clear the following bits in mstatus first:
312 // 0x00000008 -> bit 3 -> MIE (disabling interrupts here)
313 // + 0x00001800 -> bits 11,12 -> MPP (switch to usermode on mret)
314 li t0, 0x00001808
315 csrc mstatus, t0 // clear bits in mstatus
316
317 // Afterwards, set the following bits in mstatus:
318 // 0x00000080 -> bit 7 -> MPIE (enable interrupts on mret)
319 li t0, 0x00000080
320 csrs mstatus, t0 // set bits in mstatus
321
322 // Execute `_start_app_trap` on a trap by setting the mscratch trap
323 // handler address to our current stack pointer. This stack pointer,
324 // at `1*4(sp)`, holds the address of `_start_app_trap`.
325 //
326 // Upon a trap, the global trap handler (_start_trap) will swap `s0`
327 // with the `mscratch` CSR and, if it contains a non-zero address,
328 // jump to the address that is now at `1*4(s0)`. This allows us to
329 // hook a custom trap handler that saves all userspace state:
330 //
331 csrw mscratch, sp // Store `sp` in mscratch CSR. Discard the
332 // prior value, must have been set to zero.
333
334 // We have to set the mepc CSR with the PC we want the app to start
335 // executing at. This has been saved in Riscv32iStoredState for us
336 // (either when the app returned back to the kernel or in the
337 // `set_process_function()` function).
338 lw t0, 31*4(a0) // Retrieve the PC from Riscv32iStoredState
339 csrw mepc, t0 // Set mepc CSR to the app's PC.
340
341 // Restore all of the app registers from what we saved. If this is the
342 // first time running the app then most of these values are
343 // irrelevant, However we do need to set the four arguments to the
344 // `_start_ function in the app. If the app has been executing then
345 // this allows the app to correctly resume.
346
347 // We do a little switcheroo here, and place the per-process stored
348 // state pointer into the `sp` register instead of `a0`. Doing so
349 // allows us to use compressed instructions for all of these loads:
350 mv sp, a0 // sp <- a0 (per-process stored state)
351
352 lw x1, 0*4(sp) // ra
353 // ------------------------> sp, do last since we overwrite our pointer
354 lw x3, 2*4(sp) // gp
355 lw x4, 3*4(sp) // tp
356 lw x5, 4*4(sp) // t0
357 lw x6, 5*4(sp) // t1
358 lw x7, 6*4(sp) // t2
359 lw x8, 7*4(sp) // s0,fp
360 lw x9, 8*4(sp) // s1
361 lw x10, 9*4(sp) // a0
362 lw x11, 10*4(sp) // a1
363 lw x12, 11*4(sp) // a2
364 lw x13, 12*4(sp) // a3
365 lw x14, 13*4(sp) // a4
366 lw x15, 14*4(sp) // a5
367 lw x16, 15*4(sp) // a6
368 lw x17, 16*4(sp) // a7
369 lw x18, 17*4(sp) // s2
370 lw x19, 18*4(sp) // s3
371 lw x20, 19*4(sp) // s4
372 lw x21, 20*4(sp) // s5
373 lw x22, 21*4(sp) // s6
374 lw x23, 22*4(sp) // s7
375 lw x24, 23*4(sp) // s8
376 lw x25, 24*4(sp) // s9
377 lw x26, 25*4(sp) // s10
378 lw x27, 26*4(sp) // s11
379 lw x28, 27*4(sp) // t3
380 lw x29, 28*4(sp) // t4
381 lw x30, 29*4(sp) // t5
382 lw x31, 30*4(sp) // t6
383 lw x2, 1*4(sp) // sp, overwriting our pointer
384
385 // Call mret to jump to where mepc points, switch to user mode, and
386 // start running the app.
387 mret
388
389 // The global trap handler will jump to this address when catching a
390 // trap while the app is executing (address loaded into the mscratch
391 // CSR).
392 //
393 // This custom trap handler is responsible for saving application
394 // state, clearing the custom trap handler (mscratch = 0), and
395 // restoring the kernel context.
396 100: // _start_app_trap
397
398 // At this point all we know is that we entered the trap handler from
399 // an app. We don't know _why_ we got a trap, it could be from an
400 // interrupt, syscall, or fault (or maybe something else). Therefore
401 // we have to be very careful not to overwrite any registers before we
402 // have saved them.
403 //
404 // The global trap handler has swapped the app's `s0` into the
405 // mscratch CSR, which now contains the address of our stack pointer.
406 // The global trap handler further clobbered `s1`, which now contains
407 // the address of `_start_app_trap`. The app's `s1` is saved at
408 // `0*4(s0)`.
409 //
410 // Thus we can clobber `s1` and load the address of the per-process
411 // stored state:
412 //
413 lw s1, 2*4(s0)
414
415 // With the per-process stored state address in `t1`, save all
416 // non-clobbered registers. Save the `sp` first, then do the same
417 // switcheroo as above, moving the per-process stored state pointer
418 // into `sp`. This allows us to use compressed instructions for all
419 // these stores:
420 sw x2, 1*4(s1) // Save app's sp
421 mv sp, s1 // sp <- s1 (per-process stored state)
422
423 // Now, store relative to `sp` (per-process stored state) with
424 // compressed instructions:
425 sw x1, 0*4(sp) // ra
426 // ------------------------> sp, saved above
427 sw x3, 2*4(sp) // gp
428 sw x4, 3*4(sp) // tp
429 sw x5, 4*4(sp) // t0
430 sw x6, 5*4(sp) // t1
431 sw x7, 6*4(sp) // t2
432 // ------------------------> s0, in mscratch right now
433 // ------------------------> s1, stored at 0*4(s0) right now
434 sw x10, 9*4(sp) // a0
435 sw x11, 10*4(sp) // a1
436 sw x12, 11*4(sp) // a2
437 sw x13, 12*4(sp) // a3
438 sw x14, 13*4(sp) // a4
439 sw x15, 14*4(sp) // a5
440 sw x16, 15*4(sp) // a6
441 sw x17, 16*4(sp) // a7
442 sw x18, 17*4(sp) // s2
443 sw x19, 18*4(sp) // s3
444 sw x20, 19*4(sp) // s4
445 sw x21, 20*4(sp) // s5
446 sw x22, 21*4(sp) // s6
447 sw x23, 22*4(sp) // s7
448 sw x24, 23*4(sp) // s8
449 sw x25, 24*4(sp) // s9
450 sw x26, 25*4(sp) // s10
451 sw x27, 26*4(sp) // s11
452 sw x28, 27*4(sp) // t3
453 sw x29, 28*4(sp) // t4
454 sw x30, 29*4(sp) // t5
455 sw x31, 30*4(sp) // t6
456
457 // At this point, we can restore s0 into our stack pointer:
458 mv sp, s0
459
460 // Now retrieve the original value of s1 and save that as well. We
461 // must not clobber s1, our per-process stored state pointer.
462 lw s0, 0*4(sp) // s0 = app s1 (from trap handler scratch space)
463 sw s0, 8*4(s1) // Save app s1 to per-process state
464
465 // Retrieve the original value of s0 from the mscratch CSR, save it.
466 //
467 // This will also restore the kernel trap handler by writing zero to
468 // the CSR. `csrrw` allows us to read and write the CSR in a single
469 // instruction:
470 csrrw s0, mscratch, zero // s0 <- mscratch[app s0] <- zero
471 sw s0, 7*4(s1) // Save app s0 to per-process state
472
473 // -------------------------------------------------------------------
474 // At this point, the entire app register file is saved. We also
475 // restored the kernel trap handler. We have restored the following
476 // kernel registers:
477 //
478 // - sp: kernel stack pointer
479 // - s1: per-process stored state pointer
480 //
481 // We avoid clobbering those registers from this point onward.
482 // -------------------------------------------------------------------
483
484 // We also need to store some other information about the trap reason,
485 // present in CSRs:
486 //
487 // - the app's PC (mepc),
488 // - the trap reason (mcause),
489 // - the trap 'value' (mtval, e.g., faulting address).
490 //
491 // We need to store mcause because we use that to determine why the
492 // app stopped executing and returned to the kernel. We store mepc
493 // because it is where we need to return to in the app at some
494 // point. We need to store mtval in case the app faulted and we need
495 // mtval to help with debugging.
496 //
497 // We use `s0` as a scratch register, as it fits into the 3-bit
498 // register argument of RISC-V compressed loads / stores:
499
500 // Save the PC to the stored state struct. We also load the address
501 // of _return_to_kernel into it, as this will be where we jump on
502 // the mret instruction, which leaves the trap handler.
503 la s0, 300f // Load _return_to_kernel into t0.
504 csrrw s0, mepc, s0 // s0 <- mepc[app pc] <- _return_to_kernel
505 sw s0, 31*4(s1) // Store app's pc in stored state struct.
506
507 // Save mtval to the stored state struct
508 csrr s0, mtval
509 sw s0, 33*4(s1)
510
511 // Save mcause and leave it loaded into a0, as we call a function
512 // with it below:
513 csrr a0, mcause
514 sw a0, 32*4(s1)
515
516 // Depending on the value of a0, we might be calling into a function
517 // while still in the trap handler. The callee may rely on the `gp`,
518 // `tp`, and `fp` (s0) registers to be set correctly. Thus we restore
519 // them here, as we need to do anyways. They are saved registers,
520 // and so we avoid clobbering them beyond this point.
521 //
522 // We do not restore `s1`, as we need to move it back into `a0`
523 // _after_ potentially invoking the _disable_interrupt_... function.
524 // LLVM relies on it to not be clobbered internally, but it is not
525 // part of the RISC-V C ABI, which we need to follow here.
526 //
527 lw x8, 5*4(sp) // fp/s0: Restore the frame pointer
528 lw x4, 4*4(sp) // tp: Restore the thread pointer
529 lw x3, 3*4(sp) // gp: Restore the global pointer
530
531 // --------------------------------------------------------------------
532 // From this point onward, avoid clobbering the following registers:
533 //
534 // - x2 / sp: kernel stack pointer
535 // - x3 / gp: kernel global pointer
536 // - x4 / tp: kernel thread pointer
537 // - x8 / s0 / fp: kernel frame pointer
538 // - x9 / s1: per-process stored state pointer
539 //
540 // --------------------------------------------------------------------
541
542 // Now we need to check if this was an interrupt, and if it was,
543 // then we need to disable the interrupt before returning from this
544 // trap handler so that it does not fire again.
545 //
546 // If mcause is greater than or equal to zero this was not an
547 // interrupt (i.e. the most significant bit is not 1). In this case,
548 // jump to _start_app_trap_continue.
549 bge a0, zero, 200f
550
551 // This was an interrupt. Call the interrupt disable function, with
552 // mcause already loaded in a0.
553 //
554 // This may clobber all caller-saved registers. However, at this
555 // stage, we only restored `sp`, `s1`, and the registers above, all of
556 // which are saved. Thus we don't have to worry about the function
557 // call clobbering these registers.
558 //
559 // However, before we can execute any Rust code within a trap handler
560 // context, we must set the hart-specific 'are we in a trap handler'
561 // flag as an offset to the `_trap_handler_active` symbol. The chip
562 // crate is responsible for defining this symbol, and ensuring it is
563 // large enough to fit `max(mhartid) * MXLEN` bytes.
564 //
565 // First, calculate its address and save it in a callee-saved
566 // register. We use `s2`: the app's `s2` has already been saved, and
567 // its not one of the register's we've already restored:
568 la s2, _trap_handler_active // s2 = addr(_trap_handler_active)
569 csrr t0, mhartid // t0 = hartid
570 slli t0, t0, 2 // t0 = t0 * 4
571 add s2, s2, t0 // s2 = addr(_trap_handler_active[hartid])
572
573 // Indicate that we are in a trap handler on this hart:
574 li t0, 1
575 sw t0, 0(s2)
576
577 jal ra, _disable_interrupt_trap_rust_from_app
578
579 // Indicate that we are no longer going to be in a trap handler on
580 // this hart:
581 sw x0, 0(s2)
582
583 200: // _start_app_trap_continue
584
585 // Need to set mstatus.MPP to 0b11 so that we stay in machine mode.
586 //
587 // We use `a0` as a scratch register, as we are allowed to clobber it
588 // here, and it fits into a compressed load instruction. We must avoid
589 // using restored saved registers like `s0`, etc.
590 //
591 li a0, 0x1800 // Load 0b11 to the MPP bits location in a0
592 csrs mstatus, a0 // mstatus |= a0
593
594 // Use mret to exit the trap handler and return to the context
595 // switching code. We loaded the address of _return_to_kernel
596 // into mepc above.
597 mret
598
599 // This is where the trap handler jumps back to after the app stops
600 // executing.
601 300: // _return_to_kernel
602
603 // We have already stored the app registers in the trap handler. We
604 // have further restored `gp`, `tp`, `fp`/`s0` and the stack pointer.
605 //
606 // The only other non-clobbered registers are `s1` and `a0`, where
607 // `a0` needs to hold the per-process state pointer currently stored
608 // in `s1`, and the original value of `s1` is saved on the stack.
609 // Restore them:
610 //
611 mv a0, s1 // a0 = per-process stored state
612 lw s1, 6*4(sp) // restore s1 (used by LLVM internally)
613
614 // We need thus need to mark all registers as clobbered, except:
615 //
616 // - x2 (sp)
617 // - x3 (gp)
618 // - x4 (tp)
619 // - x8 (fp)
620 // - x9 (s1)
621 // - x10 (a0)
622
623 addi sp, sp, 8*4 // Reset kernel stack pointer
624 ",
625
626 in("x10") core::ptr::from_mut::<Riscv32iStoredState>(state),
630
631 out("x1") _, out("x5") _, out("x6") _, out("x7") _, out("x11") _,
636 out("x12") _, out("x13") _, out("x14") _, out("x15") _, out("x16") _,
637 out("x17") _, out("x18") _, out("x19") _, out("x20") _, out("x21") _,
638 out("x22") _, out("x23") _, out("x24") _, out("x25") _, out("x26") _,
639 out("x27") _, out("x28") _, out("x29") _, out("x30") _, out("x31") _,
640 );
641
642 let ret = match mcause::Trap::from(state.mcause as usize) {
643 mcause::Trap::Interrupt(_intr) => {
644 ContextSwitchReason::Interrupted
646 }
647 mcause::Trap::Exception(excp) => {
648 match excp {
649 mcause::Exception::UserEnvCall | mcause::Exception::MachineEnvCall => {
652 state.pc = state.pc.wrapping_add(4);
655
656 let syscall = kernel::syscall::Syscall::from_register_arguments(
657 state.regs[R_A4] as u8,
658 state.regs[R_A0] as usize,
659 (state.regs[R_A1] as usize).into(),
660 (state.regs[R_A2] as usize).into(),
661 (state.regs[R_A3] as usize).into(),
662 );
663
664 match syscall {
665 Some(s) => ContextSwitchReason::SyscallFired { syscall: s },
666 None => ContextSwitchReason::Fault,
667 }
668 }
669 _ => {
670 ContextSwitchReason::Fault
672 }
673 }
674 }
675 };
676 let new_stack_pointer = state.regs[R_SP];
677 (ret, Some(new_stack_pointer as *const u8))
678 }
679
680 unsafe fn print_context(
681 &self,
682 _accessible_memory_start: *const u8,
683 _app_brk: *const u8,
684 state: &Riscv32iStoredState,
685 writer: &mut dyn Write,
686 ) {
687 let _ = writer.write_fmt(format_args!(
688 "\
689 \r\n R0 : {:#010X} R16: {:#010X}\
690 \r\n R1 : {:#010X} R17: {:#010X}\
691 \r\n R2 : {:#010X} R18: {:#010X}\
692 \r\n R3 : {:#010X} R19: {:#010X}\
693 \r\n R4 : {:#010X} R20: {:#010X}\
694 \r\n R5 : {:#010X} R21: {:#010X}\
695 \r\n R6 : {:#010X} R22: {:#010X}\
696 \r\n R7 : {:#010X} R23: {:#010X}\
697 \r\n R8 : {:#010X} R24: {:#010X}\
698 \r\n R9 : {:#010X} R25: {:#010X}\
699 \r\n R10: {:#010X} R26: {:#010X}\
700 \r\n R11: {:#010X} R27: {:#010X}\
701 \r\n R12: {:#010X} R28: {:#010X}\
702 \r\n R13: {:#010X} R29: {:#010X}\
703 \r\n R14: {:#010X} R30: {:#010X}\
704 \r\n R15: {:#010X} R31: {:#010X}\
705 \r\n PC : {:#010X}\
706 \r\n\
707 \r\n mcause: {:#010X} (",
708 0,
709 state.regs[15],
710 state.regs[0],
711 state.regs[16],
712 state.regs[1],
713 state.regs[17],
714 state.regs[2],
715 state.regs[18],
716 state.regs[3],
717 state.regs[19],
718 state.regs[4],
719 state.regs[20],
720 state.regs[5],
721 state.regs[21],
722 state.regs[6],
723 state.regs[22],
724 state.regs[7],
725 state.regs[23],
726 state.regs[8],
727 state.regs[24],
728 state.regs[9],
729 state.regs[25],
730 state.regs[10],
731 state.regs[26],
732 state.regs[11],
733 state.regs[27],
734 state.regs[12],
735 state.regs[28],
736 state.regs[13],
737 state.regs[29],
738 state.regs[14],
739 state.regs[30],
740 state.pc,
741 state.mcause,
742 ));
743 crate::print_mcause(mcause::Trap::from(state.mcause as usize), writer);
744 let _ = writer.write_fmt(format_args!(
745 ")\
746 \r\n mtval: {:#010X}\
747 \r\n\r\n",
748 state.mtval,
749 ));
750 }
751
752 fn store_context(
753 &self,
754 state: &Riscv32iStoredState,
755 out: &mut [u8],
756 ) -> Result<usize, ErrorCode> {
757 const U32_SZ: usize = size_of::<usize>();
758 if out.len() >= size_of::<Riscv32iStoredState>() + METADATA_LEN * U32_SZ {
759 write_u32_to_u8_slice(VERSION, out, VERSION_IDX);
760 write_u32_to_u8_slice(STORED_STATE_SIZE, out, SIZE_IDX);
761 write_u32_to_u8_slice(u32::from_le_bytes(TAG), out, TAG_IDX);
762 write_u32_to_u8_slice(state.pc, out, PC_IDX);
763 write_u32_to_u8_slice(state.mcause, out, MCAUSE_IDX);
764 write_u32_to_u8_slice(state.mtval, out, MTVAL_IDX);
765 for (i, v) in state.regs.iter().enumerate() {
766 write_u32_to_u8_slice(*v, out, REGS_IDX + i);
767 }
768 Ok((state.regs.len() + 3 + METADATA_LEN) * U32_SZ)
770 } else {
771 Err(ErrorCode::SIZE)
772 }
773 }
774}