1use core::fmt::Write;
9use core::marker::PhantomData;
10use core::mem::{self, size_of};
11use core::ops::Range;
12use core::ptr::{self, addr_of, addr_of_mut, read_volatile, write_volatile};
13use kernel::errorcode::ErrorCode;
14
15use crate::CortexMVariant;
16
17#[no_mangle]
21#[used]
22pub static mut SYSCALL_FIRED: usize = 0;
23
24#[no_mangle]
31#[used]
32pub static mut APP_HARD_FAULT: usize = 0;
33
34#[no_mangle]
40#[used]
41pub static mut SCB_REGISTERS: [u32; 5] = [0; 5];
42
43const SVC_FRAME_SIZE: usize = 32;
45
46#[derive(Default)]
49pub struct CortexMStoredState {
50 regs: [usize; 8],
51 yield_pc: usize,
52 psr: usize,
53 psp: usize,
54}
55
56const VERSION: usize = 1;
58const STORED_STATE_SIZE: usize = size_of::<CortexMStoredState>();
59const TAG: [u8; 4] = [b'c', b't', b'x', b'm'];
60const METADATA_LEN: usize = 3;
61
62const VERSION_IDX: usize = 0;
63const SIZE_IDX: usize = 1;
64const TAG_IDX: usize = 2;
65const YIELDPC_IDX: usize = 3;
66const PSR_IDX: usize = 4;
67const PSP_IDX: usize = 5;
68const REGS_IDX: usize = 6;
69const REGS_RANGE: Range<usize> = REGS_IDX..REGS_IDX + 8;
70
71const USIZE_SZ: usize = size_of::<usize>();
72fn usize_byte_range(index: usize) -> Range<usize> {
73 index * USIZE_SZ..(index + 1) * USIZE_SZ
74}
75
76fn usize_from_u8_slice(slice: &[u8], index: usize) -> Result<usize, ErrorCode> {
77 let range = usize_byte_range(index);
78 Ok(usize::from_le_bytes(
79 slice
80 .get(range)
81 .ok_or(ErrorCode::SIZE)?
82 .try_into()
83 .or(Err(ErrorCode::FAIL))?,
84 ))
85}
86
87fn write_usize_to_u8_slice(val: usize, slice: &mut [u8], index: usize) {
88 let range = usize_byte_range(index);
89 slice[range].copy_from_slice(&val.to_le_bytes());
90}
91
92impl core::convert::TryFrom<&[u8]> for CortexMStoredState {
93 type Error = ErrorCode;
94 fn try_from(ss: &[u8]) -> Result<CortexMStoredState, Self::Error> {
95 if ss.len() == size_of::<CortexMStoredState>() + METADATA_LEN * USIZE_SZ
96 && usize_from_u8_slice(ss, VERSION_IDX)? == VERSION
97 && usize_from_u8_slice(ss, SIZE_IDX)? == STORED_STATE_SIZE
98 && usize_from_u8_slice(ss, TAG_IDX)? == u32::from_le_bytes(TAG) as usize
99 {
100 let mut res = CortexMStoredState {
101 regs: [0; 8],
102 yield_pc: usize_from_u8_slice(ss, YIELDPC_IDX)?,
103 psr: usize_from_u8_slice(ss, PSR_IDX)?,
104 psp: usize_from_u8_slice(ss, PSP_IDX)?,
105 };
106 for (i, v) in (REGS_RANGE).enumerate() {
107 res.regs[i] = usize_from_u8_slice(ss, v)?;
108 }
109 Ok(res)
110 } else {
111 Err(ErrorCode::FAIL)
112 }
113 }
114}
115
116pub struct SysCall<A: CortexMVariant>(PhantomData<A>);
119
120impl<A: CortexMVariant> SysCall<A> {
121 pub const unsafe fn new() -> SysCall<A> {
122 SysCall(PhantomData)
123 }
124}
125
126impl<A: CortexMVariant> kernel::syscall::UserspaceKernelBoundary for SysCall<A> {
127 type StoredState = CortexMStoredState;
128
129 fn initial_process_app_brk_size(&self) -> usize {
130 SVC_FRAME_SIZE
133 }
134
135 unsafe fn initialize_process(
136 &self,
137 accessible_memory_start: *const u8,
138 app_brk: *const u8,
139 state: &mut Self::StoredState,
140 ) -> Result<(), ()> {
141 state.regs.iter_mut().for_each(|x| *x = 0);
145 state.yield_pc = 0;
146 state.psr = 0x01000000; state.psp = app_brk as usize; if (app_brk as usize - accessible_memory_start as usize) < SVC_FRAME_SIZE {
151 return Err(());
153 }
154
155 state.psp -= SVC_FRAME_SIZE;
157 Ok(())
158 }
159
160 unsafe fn set_syscall_return_value(
161 &self,
162 accessible_memory_start: *const u8,
163 app_brk: *const u8,
164 state: &mut Self::StoredState,
165 return_value: kernel::syscall::SyscallReturn,
166 ) -> Result<(), ()> {
167 if state.psp < accessible_memory_start as usize
174 || state.psp.saturating_add(mem::size_of::<u32>() * 4) > app_brk as usize
175 {
176 return Err(());
177 }
178
179 let sp = state.psp as *mut u32;
180 let (r0, r1, r2, r3) = (sp.offset(0), sp.offset(1), sp.offset(2), sp.offset(3));
181
182 kernel::utilities::arch_helpers::encode_syscall_return_trd104(
201 &kernel::utilities::arch_helpers::TRD104SyscallReturn::from_syscall_return(
202 return_value,
203 ),
204 &mut *r0,
205 &mut *r1,
206 &mut *r2,
207 &mut *r3,
208 );
209
210 Ok(())
211 }
212
213 unsafe fn set_process_function(
226 &self,
227 accessible_memory_start: *const u8,
228 app_brk: *const u8,
229 state: &mut CortexMStoredState,
230 callback: kernel::process::FunctionCall,
231 ) -> Result<(), ()> {
232 if state.psp < accessible_memory_start as usize
235 || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize
236 {
237 return Err(());
238 }
239
240 let stack_bottom = state.psp as *mut usize;
244 ptr::write(stack_bottom.offset(7), state.psr); ptr::write(stack_bottom.offset(6), callback.pc.addr() | 1); ptr::write(stack_bottom.offset(5), state.yield_pc | 1); ptr::write(stack_bottom.offset(3), callback.argument3.as_usize()); ptr::write(stack_bottom.offset(2), callback.argument2); ptr::write(stack_bottom.offset(1), callback.argument1); ptr::write(stack_bottom.offset(0), callback.argument0); Ok(())
253 }
254
255 unsafe fn switch_to_process(
256 &self,
257 accessible_memory_start: *const u8,
258 app_brk: *const u8,
259 state: &mut CortexMStoredState,
260 ) -> (kernel::syscall::ContextSwitchReason, Option<*const u8>) {
261 let new_stack_pointer = A::switch_to_user(state.psp as *const usize, &mut state.regs);
262
263 state.psp = new_stack_pointer as usize;
265
266 let invalid_stack_pointer = state.psp < accessible_memory_start as usize
270 || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize;
271
272 let app_fault = read_volatile(&*addr_of!(APP_HARD_FAULT));
278 write_volatile(&mut *addr_of_mut!(APP_HARD_FAULT), 0);
279
280 let syscall_fired = read_volatile(&*addr_of!(SYSCALL_FIRED));
283 write_volatile(&mut *addr_of_mut!(SYSCALL_FIRED), 0);
284
285 let switch_reason = if app_fault == 1 || invalid_stack_pointer {
287 kernel::syscall::ContextSwitchReason::Fault
290 } else if syscall_fired == 1 {
291 state.yield_pc = ptr::read(new_stack_pointer.offset(6));
296 state.psr = ptr::read(new_stack_pointer.offset(7));
297
298 let r0 = ptr::read(new_stack_pointer.offset(0));
302 let r1 = ptr::read(new_stack_pointer.offset(1));
303 let r2 = ptr::read(new_stack_pointer.offset(2));
304 let r3 = ptr::read(new_stack_pointer.offset(3));
305
306 let pcptr = ptr::read((new_stack_pointer as *const *const u16).offset(6));
308 let svc_instr = ptr::read(pcptr.offset(-1));
309 let svc_num = (svc_instr & 0xff) as u8;
310
311 let syscall = kernel::syscall::Syscall::from_register_arguments(
314 svc_num,
315 r0,
316 r1.into(),
317 r2.into(),
318 r3.into(),
319 );
320
321 match syscall {
322 Some(s) => kernel::syscall::ContextSwitchReason::SyscallFired { syscall: s },
323 None => kernel::syscall::ContextSwitchReason::Fault,
324 }
325 } else {
326 kernel::syscall::ContextSwitchReason::Interrupted
329 };
330
331 (switch_reason, Some(new_stack_pointer as *const u8))
332 }
333
334 unsafe fn print_context(
335 &self,
336 accessible_memory_start: *const u8,
337 app_brk: *const u8,
338 state: &CortexMStoredState,
339 writer: &mut dyn Write,
340 ) {
341 let invalid_stack_pointer = state.psp < accessible_memory_start as usize
344 || state.psp.saturating_add(SVC_FRAME_SIZE) > app_brk as usize;
345
346 let stack_pointer = state.psp as *const usize;
347
348 let (r0, r1, r2, r3, r12, lr, pc, xpsr) = if invalid_stack_pointer {
352 (
353 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD, 0xBAD00BAD,
354 0xBAD00BAD,
355 )
356 } else {
357 let r0 = ptr::read(stack_pointer.offset(0));
358 let r1 = ptr::read(stack_pointer.offset(1));
359 let r2 = ptr::read(stack_pointer.offset(2));
360 let r3 = ptr::read(stack_pointer.offset(3));
361 let r12 = ptr::read(stack_pointer.offset(4));
362 let lr = ptr::read(stack_pointer.offset(5));
363 let pc = ptr::read(stack_pointer.offset(6));
364 let xpsr = ptr::read(stack_pointer.offset(7));
365 (r0, r1, r2, r3, r12, lr, pc, xpsr)
366 };
367
368 let _ = writer.write_fmt(format_args!(
369 "\
370 \r\n R0 : {:#010X} R6 : {:#010X}\
371 \r\n R1 : {:#010X} R7 : {:#010X}\
372 \r\n R2 : {:#010X} R8 : {:#010X}\
373 \r\n R3 : {:#010X} R10: {:#010X}\
374 \r\n R4 : {:#010X} R11: {:#010X}\
375 \r\n R5 : {:#010X} R12: {:#010X}\
376 \r\n R9 : {:#010X} (Static Base Register)\
377 \r\n SP : {:#010X} (Process Stack Pointer)\
378 \r\n LR : {:#010X}\
379 \r\n PC : {:#010X}\
380 \r\n YPC : {:#010X}\
381 \r\n",
382 r0,
383 state.regs[2],
384 r1,
385 state.regs[3],
386 r2,
387 state.regs[4],
388 r3,
389 state.regs[6],
390 state.regs[0],
391 state.regs[7],
392 state.regs[1],
393 r12,
394 state.regs[5],
395 stack_pointer as usize,
396 lr,
397 pc,
398 state.yield_pc,
399 ));
400 let _ = writer.write_fmt(format_args!(
401 "\
402 \r\n APSR: N {} Z {} C {} V {} Q {}\
403 \r\n GE {} {} {} {}",
404 (xpsr >> 31) & 0x1,
405 (xpsr >> 30) & 0x1,
406 (xpsr >> 29) & 0x1,
407 (xpsr >> 28) & 0x1,
408 (xpsr >> 27) & 0x1,
409 (xpsr >> 19) & 0x1,
410 (xpsr >> 18) & 0x1,
411 (xpsr >> 17) & 0x1,
412 (xpsr >> 16) & 0x1,
413 ));
414 let ici_it = (((xpsr >> 25) & 0x3) << 6) | ((xpsr >> 10) & 0x3f);
415 let thumb_bit = ((xpsr >> 24) & 0x1) == 1;
416 let _ = writer.write_fmt(format_args!(
417 "\
418 \r\n EPSR: ICI.IT {:#04x}\
419 \r\n ThumbBit {} {}\r\n",
420 ici_it,
421 thumb_bit,
422 if thumb_bit {
423 ""
424 } else {
425 "!!ERROR - Cortex M Thumb only!"
426 },
427 ));
428 }
429
430 fn store_context(
431 &self,
432 state: &CortexMStoredState,
433 out: &mut [u8],
434 ) -> Result<usize, ErrorCode> {
435 if out.len() >= size_of::<CortexMStoredState>() + 3 * USIZE_SZ {
436 write_usize_to_u8_slice(VERSION, out, VERSION_IDX);
437 write_usize_to_u8_slice(STORED_STATE_SIZE, out, SIZE_IDX);
438 write_usize_to_u8_slice(u32::from_le_bytes(TAG) as usize, out, TAG_IDX);
439 write_usize_to_u8_slice(state.yield_pc, out, YIELDPC_IDX);
440 write_usize_to_u8_slice(state.psr, out, PSR_IDX);
441 write_usize_to_u8_slice(state.psp, out, PSP_IDX);
442 for (i, v) in state.regs.iter().enumerate() {
443 write_usize_to_u8_slice(*v, out, REGS_IDX + i);
444 }
445 Ok((state.regs.len() + 3 + METADATA_LEN) * USIZE_SZ)
447 } else {
448 Err(ErrorCode::SIZE)
449 }
450 }
451}