1use core::fmt::Write;
11use core::marker::PhantomData;
12use core::mem::{self, size_of};
13use core::ops::Range;
14use core::ptr::{self, addr_of, addr_of_mut, read_volatile, write_volatile};
15use kernel::errorcode::ErrorCode;
16
17use crate::CortexMVariant;
18
19#[no_mangle]
23#[used]
24pub static mut SYSCALL_FIRED: usize = 0;
25
26#[no_mangle]
33#[used]
34pub static mut APP_HARD_FAULT: usize = 0;
35
36#[no_mangle]
42#[used]
43pub static mut SCB_REGISTERS: [u32; 5] = [0; 5];
44
45#[derive(Default)]
48pub struct CortexMStoredState {
49 regs: [usize; 8],
50 yield_pc: usize,
51 psr: usize,
52 psp: usize,
53}
54
55const SVC_FRAME_SIZE: usize = 32;
57
58const VERSION: usize = 1;
60const STORED_STATE_SIZE: usize = size_of::<CortexMStoredState>();
61const TAG: [u8; 4] = [b'c', b't', b'x', b'm'];
62const METADATA_LEN: usize = 3;
63
64const VERSION_IDX: usize = 0;
65const SIZE_IDX: usize = 1;
66const TAG_IDX: usize = 2;
67const YIELDPC_IDX: usize = 3;
68const PSR_IDX: usize = 4;
69const PSP_IDX: usize = 5;
70const REGS_IDX: usize = 6;
71const REGS_RANGE: Range<usize> = REGS_IDX..REGS_IDX + 8;
72
73const USIZE_SZ: usize = size_of::<usize>();
74
75fn usize_byte_range(index: usize) -> Range<usize> {
76 index * USIZE_SZ..(index + 1) * USIZE_SZ
77}
78
79fn usize_from_u8_slice(slice: &[u8], index: usize) -> Result<usize, ErrorCode> {
80 let range = usize_byte_range(index);
81 Ok(usize::from_le_bytes(
82 slice
83 .get(range)
84 .ok_or(ErrorCode::SIZE)?
85 .try_into()
86 .or(Err(ErrorCode::FAIL))?,
87 ))
88}
89
90fn write_usize_to_u8_slice(val: usize, slice: &mut [u8], index: usize) {
91 let range = usize_byte_range(index);
92 slice[range].copy_from_slice(&val.to_le_bytes());
93}
94
95impl core::convert::TryFrom<&[u8]> for CortexMStoredState {
96 type Error = ErrorCode;
97 fn try_from(ss: &[u8]) -> Result<CortexMStoredState, Self::Error> {
98 if ss.len() == size_of::<CortexMStoredState>() + METADATA_LEN * USIZE_SZ
99 && usize_from_u8_slice(ss, VERSION_IDX)? == VERSION
100 && usize_from_u8_slice(ss, SIZE_IDX)? == STORED_STATE_SIZE
101 && usize_from_u8_slice(ss, TAG_IDX)? == u32::from_le_bytes(TAG) as usize
102 {
103 let mut res = CortexMStoredState {
104 regs: [0; 8],
105 yield_pc: usize_from_u8_slice(ss, YIELDPC_IDX)?,
106 psr: usize_from_u8_slice(ss, PSR_IDX)?,
107 psp: usize_from_u8_slice(ss, PSP_IDX)?,
108 };
109 for (i, v) in (REGS_RANGE).enumerate() {
110 res.regs[i] = usize_from_u8_slice(ss, v)?;
111 }
112 Ok(res)
113 } else {
114 Err(ErrorCode::FAIL)
115 }
116 }
117}
118
119pub struct SysCall<A: CortexMVariant>(PhantomData<A>);
123
124impl<A: CortexMVariant> SysCall<A> {
125 pub const unsafe fn new() -> SysCall<A> {
126 SysCall(PhantomData)
127 }
128}
129
130impl<A: CortexMVariant> kernel::syscall::UserspaceKernelBoundary for SysCall<A> {
131 type StoredState = CortexMStoredState;
132
133 fn initial_process_app_brk_size(&self) -> usize {
134 SVC_FRAME_SIZE
137 }
138
139 unsafe fn initialize_process(
140 &self,
141 accessible_memory_start: *const u8,
142 app_brk: *const u8,
143 state: &mut Self::StoredState,
144 ) -> Result<(), ()> {
145 state.regs.iter_mut().for_each(|x| *x = 0);
149 state.yield_pc = 0;
150 state.psr = 0x01000000; state.psp = app_brk as usize; if (app_brk as usize - accessible_memory_start as usize) < SVC_FRAME_SIZE {
155 return Err(());
157 }
158
159 state.psp -= SVC_FRAME_SIZE;
161 Ok(())
162 }
163
164 unsafe fn set_syscall_return_value(
165 &self,
166 accessible_memory_start: *const u8,
167 app_brk: *const u8,
168 state: &mut Self::StoredState,
169 return_value: kernel::syscall::SyscallReturn,
170 ) -> Result<(), ()> {
171 if state.psp < accessible_memory_start as usize
178 || state.psp.saturating_add(mem::size_of::<u32>() * 4) > app_brk as usize
179 {
180 return Err(());
181 }
182
183 let sp = state.psp as *mut u32;
184 let (r0, r1, r2, r3) = (sp.offset(0), sp.offset(1), sp.offset(2), sp.offset(3));
185
186 kernel::utilities::arch_helpers::encode_syscall_return_trd104(
205 &kernel::utilities::arch_helpers::TRD104SyscallReturn::from_syscall_return(
206 return_value,
207 ),
208 &mut *r0,
209 &mut *r1,
210 &mut *r2,
211 &mut *r3,
212 );
213
214 Ok(())
215 }
216
217 unsafe fn set_process_function(
230 &self,
231 accessible_memory_start: *const u8,
232 app_brk: *const u8,
233 state: &mut CortexMStoredState,
234 callback: kernel::process::FunctionCall,
235 ) -> Result<(), ()> {
236 if state.psp < accessible_memory_start as usize
239 || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize
240 {
241 return Err(());
242 }
243
244 let stack_bottom = state.psp as *mut usize;
248 ptr::write(stack_bottom.offset(7), state.psr); ptr::write(stack_bottom.offset(6), callback.pc.addr() | 1); ptr::write(stack_bottom.offset(5), state.yield_pc | 1); ptr::write(stack_bottom.offset(3), callback.argument3.as_usize()); ptr::write(stack_bottom.offset(2), callback.argument2); ptr::write(stack_bottom.offset(1), callback.argument1); ptr::write(stack_bottom.offset(0), callback.argument0); Ok(())
257 }
258
259 unsafe fn switch_to_process(
260 &self,
261 accessible_memory_start: *const u8,
262 app_brk: *const u8,
263 state: &mut CortexMStoredState,
264 ) -> (kernel::syscall::ContextSwitchReason, Option<*const u8>) {
265 let new_stack_pointer = A::switch_to_user(state.psp as *const usize, &mut state.regs);
266
267 state.psp = new_stack_pointer as usize;
269
270 let invalid_stack_pointer = state.psp < accessible_memory_start as usize
274 || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize;
275
276 let app_fault = read_volatile(&*addr_of!(APP_HARD_FAULT));
282 write_volatile(&mut *addr_of_mut!(APP_HARD_FAULT), 0);
283
284 let syscall_fired = read_volatile(&*addr_of!(SYSCALL_FIRED));
287 write_volatile(&mut *addr_of_mut!(SYSCALL_FIRED), 0);
288
289 let switch_reason = if app_fault == 1 || invalid_stack_pointer {
291 kernel::syscall::ContextSwitchReason::Fault
294 } else if syscall_fired == 1 {
295 state.yield_pc = ptr::read(new_stack_pointer.offset(6));
300 state.psr = ptr::read(new_stack_pointer.offset(7));
301
302 let r0 = ptr::read(new_stack_pointer.offset(0));
306 let r1 = ptr::read(new_stack_pointer.offset(1));
307 let r2 = ptr::read(new_stack_pointer.offset(2));
308 let r3 = ptr::read(new_stack_pointer.offset(3));
309
310 let pcptr = ptr::read((new_stack_pointer as *const *const u16).offset(6));
312 let svc_instr = ptr::read(pcptr.offset(-1));
313 let svc_num = (svc_instr & 0xff) as u8;
314
315 let syscall = kernel::syscall::Syscall::from_register_arguments(
318 svc_num,
319 r0,
320 r1.into(),
321 r2.into(),
322 r3.into(),
323 );
324
325 match syscall {
326 Some(s) => kernel::syscall::ContextSwitchReason::SyscallFired { syscall: s },
327 None => kernel::syscall::ContextSwitchReason::Fault,
328 }
329 } else {
330 kernel::syscall::ContextSwitchReason::Interrupted
333 };
334
335 (switch_reason, Some(new_stack_pointer as *const u8))
336 }
337
338 unsafe fn print_context(
339 &self,
340 accessible_memory_start: *const u8,
341 app_brk: *const u8,
342 state: &CortexMStoredState,
343 writer: &mut dyn Write,
344 ) {
345 let invalid_stack_pointer = state.psp < accessible_memory_start as usize
348 || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize;
349
350 let stack_pointer = state.psp as *const usize;
351
352 let (r0, r1, r2, r3, r12, lr, pc, xpsr) = if invalid_stack_pointer {
356 (
357 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD,
358 0xBAD00BAD,
359 )
360 } else {
361 let r0 = ptr::read(stack_pointer.offset(0));
362 let r1 = ptr::read(stack_pointer.offset(1));
363 let r2 = ptr::read(stack_pointer.offset(2));
364 let r3 = ptr::read(stack_pointer.offset(3));
365 let r12 = ptr::read(stack_pointer.offset(4));
366 let lr = ptr::read(stack_pointer.offset(5));
367 let pc = ptr::read(stack_pointer.offset(6));
368 let xpsr = ptr::read(stack_pointer.offset(7));
369 (r0, r1, r2, r3, r12, lr, pc, xpsr)
370 };
371
372 let _ = writer.write_fmt(format_args!(
373 "\
374 \r\n R0 : {:#010X} R6 : {:#010X}\
375 \r\n R1 : {:#010X} R7 : {:#010X}\
376 \r\n R2 : {:#010X} R8 : {:#010X}\
377 \r\n R3 : {:#010X} R10: {:#010X}\
378 \r\n R4 : {:#010X} R11: {:#010X}\
379 \r\n R5 : {:#010X} R12: {:#010X}\
380 \r\n R9 : {:#010X} (Static Base Register)\
381 \r\n SP : {:#010X} (Process Stack Pointer)\
382 \r\n LR : {:#010X}\
383 \r\n PC : {:#010X}\
384 \r\n YPC : {:#010X}\
385 \r\n",
386 r0,
387 state.regs[2],
388 r1,
389 state.regs[3],
390 r2,
391 state.regs[4],
392 r3,
393 state.regs[6],
394 state.regs[0],
395 state.regs[7],
396 state.regs[1],
397 r12,
398 state.regs[5],
399 stack_pointer as usize,
400 lr,
401 pc,
402 state.yield_pc,
403 ));
404 let _ = writer.write_fmt(format_args!(
405 "\
406 \r\n APSR: N {} Z {} C {} V {} Q {}\
407 \r\n GE {} {} {} {}",
408 (xpsr >> 31) & 0x1,
409 (xpsr >> 30) & 0x1,
410 (xpsr >> 29) & 0x1,
411 (xpsr >> 28) & 0x1,
412 (xpsr >> 27) & 0x1,
413 (xpsr >> 19) & 0x1,
414 (xpsr >> 18) & 0x1,
415 (xpsr >> 17) & 0x1,
416 (xpsr >> 16) & 0x1,
417 ));
418 let ici_it = (((xpsr >> 25) & 0x3) << 6) | ((xpsr >> 10) & 0x3f);
419 let thumb_bit = ((xpsr >> 24) & 0x1) == 1;
420 let _ = writer.write_fmt(format_args!(
421 "\
422 \r\n EPSR: ICI.IT {:#04x}\
423 \r\n ThumbBit {} {}\r\n",
424 ici_it,
425 thumb_bit,
426 if thumb_bit {
427 ""
428 } else {
429 "!!ERROR - Cortex M Thumb only!"
430 },
431 ));
432 }
433
434 fn store_context(
435 &self,
436 state: &CortexMStoredState,
437 out: &mut [u8],
438 ) -> Result<usize, ErrorCode> {
439 if out.len() >= size_of::<CortexMStoredState>() + 3 * USIZE_SZ {
440 write_usize_to_u8_slice(VERSION, out, VERSION_IDX);
441 write_usize_to_u8_slice(STORED_STATE_SIZE, out, SIZE_IDX);
442 write_usize_to_u8_slice(u32::from_le_bytes(TAG) as usize, out, TAG_IDX);
443 write_usize_to_u8_slice(state.yield_pc, out, YIELDPC_IDX);
444 write_usize_to_u8_slice(state.psr, out, PSR_IDX);
445 write_usize_to_u8_slice(state.psp, out, PSP_IDX);
446 for (i, v) in state.regs.iter().enumerate() {
447 write_usize_to_u8_slice(*v, out, REGS_IDX + i);
448 }
449 Ok((state.regs.len() + 3 + METADATA_LEN) * USIZE_SZ)
451 } else {
452 Err(ErrorCode::SIZE)
453 }
454 }
455}