capsules_extra/isolated_nonvolatile_storage_driver.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2025.
4
5//! This provides userspace access to nonvolatile storage.
6//!
7//! This driver provides isolation between individual userland applications.
8//! Each application only has access to its region of nonvolatile memory and
9//! cannot read/write to nonvolatile memory of other applications.
10//!
11//! Each app is assigned a fixed amount of nonvolatile memory. This amount is
12//! set at compile time.
13//!
14//! ## Storage Layout
15//!
16//! Example nonvolatile storage layout (note that `|` indicates bitwise
17//! concatenation):
18//!
19//! ```text
20//! ╒════════ ← Start of nonvolatile region
21//! ├──────── ← Start of App 1's region header
22//! │ Region version number (8 bits) | Region length (24 bits)
23//! │ App 1's ShortID (u32)
24//! │ XOR of previous two u32 fields (u32)
25//! ├──────── ← Start of App 1's Region ═╗
26//! │ ║
27//! │
28//! │ region 1
29//! │ length
30//! │
31//! │ ║
32//! │ ═╝
33//! ├──────── ← Start of App 2's region header
34//! │ Region version number (8 bits) | Region length (24 bits)
35//! │ App 2's ShortID (u32)
36//! │ XOR of previous two u32 fields (u32)
37//! ├──────── ← Start of App 2's Region ═╗
38//! │ ║
39//! │
40//! │
41//! │ region 2
42//! │ length
43//! │
44//! │
45//! │ ║
46//! ... ═╝
47//! ╘════════ ← End of userspace region
48//! ```
49//!
50//! ## Storage Initialization
51//!
52//! This capsule caches the location of an application's storage region in
53//! grant. This cached location is set on the first usage of this capsule.
54//!
55//! Here is a general high-level overview of what happens when an app makes its
56//! first syscall:
57//! 1. App engages with the capsule by making any syscall.
58//! 2. Capsule searches through storage to see if that app has an existing
59//! region.
60//! 3. a. If the capsule finds a matching region:
61//! - Cache the app's region information in its grant.
62//! b. If the capsule DOESN'T find a matching region:
63//! - Allocate a new region for that app.
64//! - Erase the region's usable area.
65//! 4. Handle the syscall that the app originally made.
66//! 5. When the syscall finishes, notify the app via upcall.
67//!
68//! ## Example Software Stack
69//!
70//! Here is a diagram of the expected stack with this capsule: Boxes are
71//! components and between the boxes are the traits that are the interfaces
72//! between components. This capsule only provides a userspace interface.
73//!
74//! ```text
75//! +------------------------------------------------------------------------+
76//! | |
77//! | userspace |
78//! | |
79//! +------------------------------------------------------------------------+
80//! kernel::Driver
81//! +------------------------------------------------------------------------+
82//! | |
83//! | isolated_nonvolatile_storage_driver::IsolatedNonvolatileStorage (this) |
84//! | |
85//! +------------------------------------------------------------------------+
86//! hil::nonvolatile_storage::NonvolatileStorage
87//! +------------------------------------------------------------------------+
88//! | |
89//! | Physical nonvolatile storage driver |
90//! | |
91//! +------------------------------------------------------------------------+
92//! ```
93//!
94
95use core::cmp;
96
97use kernel::errorcode::into_statuscode;
98use kernel::grant::{AllowRoCount, AllowRwCount, Grant, UpcallCount};
99use kernel::hil;
100use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
101use kernel::syscall::{CommandReturn, SyscallDriver};
102use kernel::utilities::cells::{OptionalCell, TakeCell};
103use kernel::utilities::copy_slice::CopyOrErr;
104use kernel::{ErrorCode, ProcessId};
105
106use capsules_core::driver;
107
108pub const DRIVER_NUM: usize = driver::NUM::IsolatedNvmStorage as usize;
109
110/// Recommended size for the buffer provided to this capsule.
111///
112/// This is enough space for a buffer to be used for reading/writing userspace
113/// data.
114pub const BUF_LEN: usize = 512;
115
116/// IDs for subscribed upcalls.
117mod upcall {
118 /// Get storage size done callback.
119 pub const GET_SIZE_DONE: usize = 0;
120 /// Read done callback.
121 pub const READ_DONE: usize = 1;
122 /// Write done callback.
123 pub const WRITE_DONE: usize = 2;
124 /// Number of upcalls.
125 pub const COUNT: u8 = 3;
126}
127
128/// Ids for read-only allow buffers
129mod ro_allow {
130 /// Setup a buffer to write bytes to the nonvolatile storage.
131 pub const WRITE: usize = 0;
132 /// The number of allow buffers the kernel stores for this grant
133 pub const COUNT: u8 = 1;
134}
135
136/// Ids for read-write allow buffers
137mod rw_allow {
138 /// Setup a buffer to read from the nonvolatile storage into.
139 pub const READ: usize = 0;
140 /// The number of allow buffers the kernel stores for this grant
141 pub const COUNT: u8 = 1;
142}
143
144#[derive(Clone, Copy, PartialEq, Debug)]
145#[repr(u8)]
146enum HeaderVersion {
147 V1 = 0x01,
148}
149
150// Current header version to allocate new regions with.
151const CURRENT_HEADER_VERSION: HeaderVersion = HeaderVersion::V1;
152
153/// Describes a region of nonvolatile memory that is assigned to a certain app.
154#[derive(Clone, Copy, Debug, PartialEq)]
155pub struct AppRegion {
156 /// The version is based on the capsule version and layout format in use
157 /// when the region was created. This is set to a fixed value for all new
158 /// regions. An existing region may have been created with a newer or
159 /// earlier version of this capsule and therefore might have a different
160 /// version than what we currently initialize new regions with.
161 version: HeaderVersion,
162 /// Absolute address to describe where an app's nonvolatile region starts.
163 /// Note that this is the address FOLLOWING the region's header.
164 absolute_address: usize,
165 /// How many bytes allocated to a certain app. Note that this describes the
166 /// length of the usable storage region and does not include the region's
167 /// header.
168 length: usize,
169}
170
171// Metadata to be written before every app's region to describe the owner and
172// size of the region.
173#[derive(Clone, Copy, Debug)]
174struct AppRegionHeader {
175 /// An 8 bit version number concatenated with a 24 bit length value.
176 version_and_length: u32,
177 /// Unique per-app identifier. This comes from the Fixed variant of the
178 /// ShortID type.
179 shortid: u32,
180 /// xor between `version_and_length` and `shortid` fields. This serves as a
181 /// checksum.
182 xor: u32,
183}
184/// The size of the `AppRegionHeader` stored in the nonvolatile storage.
185const REGION_HEADER_LEN: usize = 3 * core::mem::size_of::<u32>();
186
187impl AppRegionHeader {
188 fn new(version: HeaderVersion, shortid: u32, length: usize) -> Option<Self> {
189 // check that length will fit in 3 bytes
190 if length > (2 << 23) {
191 return None;
192 }
193
194 let version_and_length = ((version as u8 as u32) << 24) | length as u32;
195
196 let xor = version_and_length ^ shortid;
197
198 Some(AppRegionHeader {
199 version_and_length,
200 shortid,
201 xor,
202 })
203 }
204
205 fn from_bytes(bytes: [u8; REGION_HEADER_LEN]) -> Option<Self> {
206 // first 4 bytes are split between a 8 bit version and 24 bit length
207 let version = bytes[0];
208 let length_slice = &bytes[1..4];
209 let version_and_length_slice = [version, length_slice[0], length_slice[1], length_slice[2]];
210 let version_and_length = u32::from_le_bytes(version_and_length_slice);
211
212 let shortid_slice = bytes[4..8].try_into().ok()?;
213 let shortid = u32::from_le_bytes(shortid_slice);
214
215 let xor_slice = bytes[8..12].try_into().ok()?;
216 let xor = u32::from_le_bytes(xor_slice);
217
218 Some(AppRegionHeader {
219 version_and_length,
220 shortid,
221 xor,
222 })
223 }
224
225 fn to_bytes(self) -> [u8; REGION_HEADER_LEN] {
226 let mut header_slice = [0; REGION_HEADER_LEN];
227
228 // copy version and length
229 let version_and_length_slice = u32::to_le_bytes(self.version_and_length);
230 let version_and_length_start_idx = 0;
231 let version_and_length_end_idx = version_and_length_slice.len();
232 header_slice[version_and_length_start_idx..version_and_length_end_idx]
233 .copy_from_slice(&version_and_length_slice);
234
235 // copy shortid
236 let shortid_slice = u32::to_le_bytes(self.shortid);
237 let shortid_start_idx = version_and_length_end_idx;
238 let shortid_end_idx = shortid_start_idx + shortid_slice.len();
239 header_slice[shortid_start_idx..shortid_end_idx].copy_from_slice(&shortid_slice);
240
241 // copy version and length
242 let xor_slice = u32::to_le_bytes(self.xor);
243 let xor_start_idx = shortid_end_idx;
244 let xor_end_idx = xor_start_idx + xor_slice.len();
245 header_slice[xor_start_idx..xor_end_idx].copy_from_slice(&xor_slice);
246
247 header_slice
248 }
249
250 fn is_valid(&self) -> bool {
251 self.version().is_some() && self.xor == (self.version_and_length ^ self.shortid)
252 }
253
254 fn version(&self) -> Option<HeaderVersion> {
255 // Need to do this since we can't pattern match against a method call.
256 const HEADER_V1: u8 = HeaderVersion::V1 as u8;
257
258 // Extract the 8 most significant bits from the concatenated version and
259 // length.
260 match (self.version_and_length >> 24) as u8 {
261 HEADER_V1 => Some(HeaderVersion::V1),
262 _ => None,
263 }
264 }
265
266 fn length(&self) -> u32 {
267 // Extract the 24 least significant bits from the concatenated version
268 // and length.
269 self.version_and_length & 0x00ffffff
270 }
271}
272
273/// Operation referencing a particular region.
274#[derive(Clone, Copy, PartialEq, Debug)]
275pub enum ManagerTask {
276 /// Read the contents of the header in the region. The `usize` is the
277 /// address of the start of the header.
278 DiscoverRegions(usize),
279 /// Write a valid header to the storage.
280 WriteHeader(ProcessId, AppRegion),
281 /// Erase the contents of a region. This supports using multiple nonvolatile
282 /// storage operations to complete the entire erase.
283 EraseRegion {
284 processid: ProcessId,
285 next_erase_start: usize,
286 remaining_bytes: usize,
287 },
288}
289
290/// What is currently using the underlying nonvolatile storage driver.
291#[derive(Clone, Copy, Debug)]
292pub enum User {
293 /// The operation is from a userspace process.
294 App { processid: ProcessId },
295 /// The operation is from this capsule.
296 RegionManager(ManagerTask),
297}
298
299/// The operation the process requested.
300#[derive(Clone, Copy, Debug)]
301pub enum NvmCommand {
302 GetSize,
303 Read { offset: usize },
304 Write { offset: usize },
305}
306
307impl NvmCommand {
308 fn offset(&self) -> usize {
309 match self {
310 NvmCommand::Read { offset } => *offset,
311 NvmCommand::Write { offset } => *offset,
312 NvmCommand::GetSize => 0,
313 }
314 }
315
316 fn upcall(&self) -> usize {
317 match self {
318 Self::GetSize => upcall::GET_SIZE_DONE,
319 Self::Write { offset: _ } => upcall::WRITE_DONE,
320 Self::Read { offset: _ } => upcall::READ_DONE,
321 }
322 }
323}
324
325/// State stored in the grant region on behalf of each app.
326#[derive(Default)]
327pub struct App {
328 /// Describe the location and size of an app's region (if it has been
329 /// initialized).
330 region: Option<AppRegion>,
331 /// Operation that will be handled once init sequence is complete.
332 pending_operation: Option<NvmCommand>,
333}
334
335/// Helper function to convert create a full, single usize value from two 32-bit
336/// values stored in usizes.
337///
338/// In C this would look like:
339///
340/// ```c
341/// size_t v = (hi << 32) | (uint32_t) lo;
342/// ```
343///
344/// This is useful when passing a machine-sized value (i.e. a `size_t`) via the
345/// system call interface in two 32-bit usize values. On a 32-bit machine this
346/// essentially has no effect; the full value is stored in the `lo` usize. On a
347/// 64-bit machine, this creates a usize by concatenating the hi and lo 32-bit
348/// values.
349///
350/// TODO
351/// ----
352///
353/// This can be more succinctly implemented using
354/// [`unbounded_shl()`](https://doc.rust-lang.org/stable/std/primitive.usize.html#method.unbounded_shl).
355/// However, that method is currently a nightly-only feature.
356#[inline]
357pub const fn usize32s_to_usize(lo: usize, hi: usize) -> usize {
358 if usize::BITS <= 32 {
359 // Just return the lo value since it has the bits we need.
360 lo
361 } else {
362 // Create a 64-bit value.
363 (lo & 0xFFFFFFFF) | (hi << 32)
364 }
365}
366
367pub struct IsolatedNonvolatileStorage<'a, const APP_REGION_SIZE: usize> {
368 /// The underlying physical storage device.
369 driver: &'a dyn hil::nonvolatile_storage::NonvolatileStorage<'a>,
370 /// Per-app state.
371 apps: Grant<
372 App,
373 UpcallCount<{ upcall::COUNT }>,
374 AllowRoCount<{ ro_allow::COUNT }>,
375 AllowRwCount<{ rw_allow::COUNT }>,
376 >,
377
378 /// Internal buffer for copying appslices into.
379 buffer: TakeCell<'static, [u8]>,
380 /// What issued the currently executing call. This can be an app or the
381 /// kernel.
382 current_user: OptionalCell<User>,
383
384 /// The first byte that is accessible from userspace.
385 userspace_start_address: usize,
386 /// How many bytes allocated to userspace.
387 userspace_length: usize,
388
389 /// Absolute address of the header of the next region of userspace that's
390 /// not allocated to an app yet. Each time an app uses this capsule, a new
391 /// region of storage will be handed out and this address will point to the
392 /// header of a new unallocated region.
393 next_unallocated_region_header_address: OptionalCell<usize>,
394}
395
396impl<'a, const APP_REGION_SIZE: usize> IsolatedNonvolatileStorage<'a, APP_REGION_SIZE> {
397 pub fn new(
398 driver: &'a dyn hil::nonvolatile_storage::NonvolatileStorage<'a>,
399 grant: Grant<
400 App,
401 UpcallCount<{ upcall::COUNT }>,
402 AllowRoCount<{ ro_allow::COUNT }>,
403 AllowRwCount<{ rw_allow::COUNT }>,
404 >,
405 userspace_start_address: usize,
406 userspace_length: usize,
407 buffer: &'static mut [u8],
408 ) -> Self {
409 Self {
410 driver,
411 apps: grant,
412 buffer: TakeCell::new(buffer),
413 current_user: OptionalCell::empty(),
414 userspace_start_address,
415 userspace_length,
416 next_unallocated_region_header_address: OptionalCell::empty(),
417 }
418 }
419
420 // Start reading app region headers.
421 fn start_region_traversal(&self) -> Result<(), ErrorCode> {
422 if self.current_user.is_some() {
423 // Can't traverse the regions right now because the underlying
424 // driver is already in use.
425 return Err(ErrorCode::BUSY);
426 }
427
428 let res = self.read_region_header(self.userspace_start_address);
429 match res {
430 Ok(()) => {
431 // Mark that we started the discover operation.
432 self.current_user
433 .set(User::RegionManager(ManagerTask::DiscoverRegions(
434 self.userspace_start_address,
435 )));
436 Ok(())
437 }
438 Err(e) => {
439 // We did not successfully start the discover, return the error.
440 Err(e)
441 }
442 }
443 }
444
445 fn allocate_app_region(&self, processid: ProcessId) -> Result<(), ErrorCode> {
446 // Can't allocate a region if we haven't previously traversed existing
447 // regions and found where they stop.
448 let new_header_addr = self
449 .next_unallocated_region_header_address
450 .get()
451 .ok_or(ErrorCode::FAIL)?;
452
453 // Get an app's write_id (same as ShortID) for saving to region header.
454 // Note that if an app doesn't have the valid permissions, it will be
455 // unable to create storage regions.
456 let write_id = processid
457 .get_storage_permissions()
458 .ok_or(ErrorCode::NOSUPPORT)?
459 .get_write_id()
460 .ok_or(ErrorCode::NOSUPPORT)?;
461
462 let region = AppRegion {
463 version: CURRENT_HEADER_VERSION,
464 // Have this region start where all the existing regions end.
465 // Note that the app's actual region starts after the region header.
466 absolute_address: new_header_addr + REGION_HEADER_LEN,
467 length: APP_REGION_SIZE,
468 };
469
470 // fail if new region is outside userspace area
471 if region.absolute_address > self.userspace_start_address + self.userspace_length
472 || region.absolute_address + region.length
473 > self.userspace_start_address + self.userspace_length
474 {
475 return Err(ErrorCode::NOMEM);
476 }
477
478 let Some(header) = AppRegionHeader::new(region.version, write_id, region.length) else {
479 return Err(ErrorCode::FAIL);
480 };
481
482 // Write this new region header to the end of the existing regions.
483 let res = self.write_region_header(®ion, &header, new_header_addr);
484 match res {
485 Ok(()) => {
486 // Mark that we started the initialize region task.
487 self.current_user
488 .set(User::RegionManager(ManagerTask::WriteHeader(
489 processid, region,
490 )));
491 Ok(())
492 }
493 Err(e) => {
494 // We did not successfully start the region initialization,
495 // return the error.
496 Err(e)
497 }
498 }
499 }
500
501 // Read the header of an app's storage region. The region_header_address
502 // argument describes the start of the **header** and not the usable region
503 // itself.
504 fn read_region_header(&self, region_header_address: usize) -> Result<(), ErrorCode> {
505 self.check_header_access(region_header_address, APP_REGION_SIZE)?;
506
507 self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
508 self.driver
509 .read(buffer, region_header_address, REGION_HEADER_LEN)
510 })
511 }
512
513 // Write the header of an app's storage region. The region_header_address
514 // argument describes the start of the **header** and not the usable region
515 // itself.
516 fn write_region_header(
517 &self,
518 region: &AppRegion,
519 region_header: &AppRegionHeader,
520 region_header_address: usize,
521 ) -> Result<(), ErrorCode> {
522 self.check_header_access(region.absolute_address, region.length)?;
523
524 let header_slice = region_header.to_bytes();
525
526 self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
527 let _ = buffer
528 .get_mut(0..REGION_HEADER_LEN)
529 .ok_or(ErrorCode::NOMEM)?
530 .copy_from_slice_or_err(
531 header_slice
532 .get(0..REGION_HEADER_LEN)
533 .ok_or(ErrorCode::NOMEM)?,
534 );
535
536 self.driver
537 .write(buffer, region_header_address, REGION_HEADER_LEN)
538 })
539 }
540
541 fn erase_region_content(
542 &self,
543 offset: usize,
544 length: usize,
545 ) -> Result<(usize, usize), ErrorCode> {
546 self.check_header_access(offset, length)?;
547
548 self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
549 let active_len = cmp::min(length, buffer.len());
550
551 // Clear the erase buffer in case there was any data
552 // remaining from a previous operation.
553 for c in buffer.iter_mut() {
554 *c = 0xFF;
555 }
556
557 // how many more bytes to erase after this operation
558 let remaining_len = if length > buffer.len() {
559 length - buffer.len()
560 } else {
561 0
562 };
563
564 let next_erase_start = offset + active_len;
565
566 self.driver
567 .write(buffer, offset, active_len)
568 .and(Ok((next_erase_start, remaining_len)))
569 })
570 }
571
572 // Returns `Ok()` with the address of the next header to be read if a new
573 // header read was started.
574 fn header_read_done(&self, region_header_address: usize) -> Result<Option<usize>, ErrorCode> {
575 // Cases when a header read completes:
576 // 1. Read a valid header
577 // - The valid header belongs to a Tock app (might not be currently
578 // running).
579 // - Search for the owner of the region within the apps.
580 // - Find the owner of the region that has a matching shortid (from
581 // the header).
582 // - Then, startup another read operation to read the header of the
583 // next storage region.
584 // 2. Read an invalid header
585 // - We've reached the end of all previously allocated regions.
586 // - Allocate new app region here.
587
588 let header = self.buffer.map_or(Err(ErrorCode::NOMEM), |buffer| {
589 // Need to copy over bytes since we need to convert a &[u8] into a
590 // [u8; REGION_HEADER_LEN]. The &[u8] refers to a slice of size
591 // BUF_LEN (which could be different than REGION_HEADER_LEN). Using
592 // buffer.try_into() will fail at runtime since the underlying
593 // buffer is not the same length as what we're trying to convert
594 // into.
595 let mut header_buffer = [0; REGION_HEADER_LEN];
596 header_buffer
597 .copy_from_slice_or_err(&buffer[..REGION_HEADER_LEN])
598 .or(Err(ErrorCode::FAIL))?;
599
600 // reconstruct header from bytes we just read
601 AppRegionHeader::from_bytes(header_buffer).ok_or(ErrorCode::FAIL)
602 })?;
603
604 if header.is_valid() {
605 // Find the app with the corresponding shortid.
606 for app in self.apps.iter() {
607 let processid = app.processid();
608 // Skip an app if it doesn't have the proper storage
609 // permissions.
610 let write_id = match processid.get_storage_permissions() {
611 Some(perms) => match perms.get_write_id() {
612 Some(write_id) => write_id,
613 None => continue,
614 },
615 None => continue,
616 };
617 if write_id == header.shortid {
618 app.enter(|app, _kernel_data| {
619 if app.region.is_none() {
620 let version = header.version().ok_or(ErrorCode::FAIL)?;
621 let region = AppRegion {
622 version,
623 // The app's actual region starts after the
624 // region header.
625 absolute_address: region_header_address + REGION_HEADER_LEN,
626 length: header.length() as usize,
627 };
628 app.region.replace(region);
629 }
630 Ok::<(), ErrorCode>(())
631 })?;
632 break;
633 }
634 }
635
636 let next_header_address =
637 region_header_address + REGION_HEADER_LEN + header.length() as usize;
638 // Kick off another read for the next region.
639 self.read_region_header(next_header_address)
640 .and(Ok(Some(next_header_address)))
641 } else {
642 // This is the end of the region traversal. If a header is invalid,
643 // we've reached the end of all previously allocated regions.
644
645 // Save this region header address so that we can allocate new
646 // regions here later.
647 self.next_unallocated_region_header_address
648 .set(region_header_address);
649
650 Ok(None)
651 }
652 }
653
654 fn check_userspace_perms(
655 &self,
656 processid: ProcessId,
657 command: NvmCommand,
658 ) -> Result<(), ErrorCode> {
659 let perms = processid
660 .get_storage_permissions()
661 .ok_or(ErrorCode::NOSUPPORT)?;
662 let write_id = perms.get_write_id().ok_or(ErrorCode::NOSUPPORT)?;
663 match command {
664 NvmCommand::Read { offset: _ } => perms
665 .check_read_permission(write_id)
666 .then_some(())
667 .ok_or(ErrorCode::NOSUPPORT),
668 NvmCommand::Write { offset: _ } => perms
669 .check_modify_permission(write_id)
670 .then_some(())
671 .ok_or(ErrorCode::NOSUPPORT),
672 NvmCommand::GetSize => {
673 // If we have a `write_id` then we can return the size.
674 Ok(())
675 }
676 }
677 }
678
679 fn check_userspace_access(
680 &self,
681 offset: usize,
682 length: usize,
683 region: &AppRegion,
684 ) -> Result<(), ErrorCode> {
685 // Check that access is within this app's isolated nonvolatile region.
686 // This is to prevent an app from reading/writing to another app's
687 // nonvolatile storage.
688
689 if offset >= region.length || length > region.length || offset + length > region.length {
690 return Err(ErrorCode::INVAL);
691 }
692
693 Ok(())
694 }
695
696 fn check_header_access(&self, offset: usize, length: usize) -> Result<(), ErrorCode> {
697 // Check that we're within the entire userspace region.
698 if offset < self.userspace_start_address
699 || offset >= self.userspace_start_address + self.userspace_length
700 || length > self.userspace_length
701 || offset + length >= self.userspace_start_address + self.userspace_length
702 {
703 return Err(ErrorCode::INVAL);
704 }
705
706 Ok(())
707 }
708
709 // Check so see if we are doing something. If not, go ahead and do this
710 // command. If so, this is queued and will be run when the pending command
711 // completes.
712 fn enqueue_userspace_command(
713 &self,
714 command: NvmCommand,
715 processid: ProcessId,
716 ) -> Result<(), ErrorCode> {
717 self.check_userspace_perms(processid, command)?;
718
719 self.apps
720 .enter(processid, |app, _kernel_data| {
721 if app.pending_operation.is_some() {
722 return Err(ErrorCode::BUSY);
723 }
724 app.pending_operation = Some(command);
725 Ok(())
726 })
727 .unwrap_or_else(|err| Err(err.into()))?;
728
729 self.check_queue();
730 Ok(())
731 }
732
733 fn check_queue(&self) {
734 if self.current_user.is_some() {
735 // If the driver is busy we can't start a new operation and do not
736 // need to check the queue.
737 return;
738 }
739
740 // If this is none, we haven't traversed the existing regions yet.
741 if self.next_unallocated_region_header_address.is_none() {
742 match self.start_region_traversal() {
743 Ok(()) => {
744 // We started an operation so we can return and let that
745 // operation finish.
746 return;
747 }
748 Err(_e) => {
749 // We did not start the traversal which is a problem. This
750 // shouldn't happen, but if it does then we could overwrite
751 // existing regions.
752 return;
753 }
754 }
755 }
756
757 // Iterate apps and run an operation if one is pending.
758 for app in self.apps.iter() {
759 let processid = app.processid();
760 let started = app.enter(|app, kernel_data| {
761 match app.pending_operation {
762 Some(nvm_command) => {
763 if app.region.is_none() {
764 // This app needs its region allocated.
765 self.allocate_app_region(processid).is_ok()
766 } else {
767 let res = self.handle_syscall(nvm_command, processid, app, kernel_data);
768 match res {
769 Ok(started_operation) => started_operation,
770 Err(e) => {
771 app.pending_operation = None;
772 kernel_data
773 .schedule_upcall(
774 nvm_command.upcall(),
775 (into_statuscode(Err(e)), 0, 0),
776 )
777 .ok();
778
779 false
780 }
781 }
782 }
783 }
784 None => false,
785 }
786 });
787 if started {
788 break;
789 }
790 }
791 }
792
793 fn handle_syscall(
794 &self,
795 command: NvmCommand,
796 processid: ProcessId,
797 app: &mut App,
798 kernel_data: &kernel::grant::GrantKernelData,
799 ) -> Result<bool, ErrorCode> {
800 match command {
801 NvmCommand::GetSize => {
802 match app.region {
803 Some(region) => {
804 // clear pending syscall
805 app.pending_operation = None;
806 // signal app with the result
807 kernel_data
808 .schedule_upcall(
809 upcall::GET_SIZE_DONE,
810 (into_statuscode(Ok(())), region.length, 0),
811 )
812 .ok();
813 Ok(false)
814 }
815 None => Err(ErrorCode::NOMEM),
816 }
817 }
818
819 NvmCommand::Read { offset: _ } | NvmCommand::Write { offset: _ } => {
820 // Get the length of the correct allowed buffer.
821 let allow_buf_len = match command {
822 NvmCommand::Read { offset: _ } => kernel_data
823 .get_readwrite_processbuffer(rw_allow::READ)
824 .map_or(0, |read| read.len()),
825 NvmCommand::Write { offset: _ } => kernel_data
826 .get_readonly_processbuffer(ro_allow::WRITE)
827 .map_or(0, |read| read.len()),
828 NvmCommand::GetSize => 0,
829 };
830
831 // Check that the matching allowed buffer exists.
832 if allow_buf_len == 0 {
833 return Err(ErrorCode::RESERVE);
834 }
835
836 // Fail if the app doesn't have a region assigned to it.
837 let Some(app_region) = &app.region else {
838 return Err(ErrorCode::NOMEM);
839 };
840
841 let command_offset = command.offset();
842
843 self.check_userspace_access(command_offset, allow_buf_len, app_region)?;
844
845 // Need to copy bytes if this is a write!
846 if let NvmCommand::Write { offset: _ } = command {
847 let _ = kernel_data
848 .get_readonly_processbuffer(ro_allow::WRITE)
849 .and_then(|write| {
850 write.enter(|app_buffer| {
851 self.buffer.map(|kernel_buffer| {
852 // Check that the internal buffer and
853 // the buffer that was allowed are long
854 // enough.
855 let write_len = cmp::min(allow_buf_len, kernel_buffer.len());
856
857 let d = &app_buffer[0..write_len];
858 for (i, c) in kernel_buffer[0..write_len].iter_mut().enumerate()
859 {
860 *c = d[i].get();
861 }
862 });
863 })
864 });
865 }
866
867 // Calculate where we want to actually read from in the
868 // physical storage. Note that the offset for this
869 // command is with respect to the app's region address
870 // space. This means that userspace accesses start at 0
871 // which is the start of the app's region.
872 let physical_address = app_region.absolute_address + command_offset;
873
874 let res = self
875 .buffer
876 .take()
877 .map_or(Err(ErrorCode::RESERVE), |buffer| {
878 // Check that the internal buffer and the buffer that was
879 // allowed are long enough.
880 let active_len_buf = cmp::min(allow_buf_len, buffer.len());
881
882 match command {
883 NvmCommand::Read { offset: _ } => self
884 .driver
885 .read(buffer, physical_address, active_len_buf)
886 .or(Err(ErrorCode::FAIL)),
887 NvmCommand::Write { offset: _ } => self
888 .driver
889 .write(buffer, physical_address, active_len_buf)
890 .or(Err(ErrorCode::FAIL)),
891 NvmCommand::GetSize => Err(ErrorCode::FAIL),
892 }
893 });
894 match res {
895 Ok(()) => {
896 self.current_user.set(User::App { processid });
897 Ok(true)
898 }
899 Err(e) => Err(e),
900 }
901 }
902 }
903 }
904}
905
906/// This is the callback client for the underlying physical storage driver.
907impl<const APP_REGION_SIZE: usize> hil::nonvolatile_storage::NonvolatileStorageClient
908 for IsolatedNonvolatileStorage<'_, APP_REGION_SIZE>
909{
910 fn read_done(&self, buffer: &'static mut [u8], length: usize) {
911 // Switch on which user of this capsule generated this callback.
912 self.current_user.take().map(|user| {
913 match user {
914 User::RegionManager(state) => {
915 self.buffer.replace(buffer);
916 if let ManagerTask::DiscoverRegions(address) = state {
917 let res = self.header_read_done(address);
918 match res {
919 Ok(addr) => match addr {
920 Some(next_header_address) => {
921 self.current_user.set(User::RegionManager(
922 ManagerTask::DiscoverRegions(next_header_address),
923 ));
924 }
925 None => {
926 // We finished the scan of existing
927 // regions. Now we can check the queue
928 // to see if there is any work to be
929 // done.
930 self.check_queue();
931 }
932 },
933 Err(_e) => {
934 // Not clear what to do here.
935 self.check_queue();
936 }
937 }
938 }
939 }
940 User::App { processid } => {
941 let _ = self.apps.enter(processid, move |app, kernel_data| {
942 // Need to copy in the contents of the buffer
943 let read_len = kernel_data
944 .get_readwrite_processbuffer(rw_allow::READ)
945 .and_then(|read| {
946 read.mut_enter(|app_buffer| {
947 let read_len = cmp::min(app_buffer.len(), length);
948
949 let d = &app_buffer[0..read_len];
950 for (i, c) in buffer[0..read_len].iter().enumerate() {
951 d[i].set(*c);
952 }
953 read_len
954 })
955 })
956 .unwrap_or(0);
957
958 // Replace the buffer we used to do this read.
959 self.buffer.replace(buffer);
960
961 // clear pending syscall
962 app.pending_operation = None;
963 // And then signal the app.
964 kernel_data
965 .schedule_upcall(
966 upcall::READ_DONE,
967 (into_statuscode(Ok(())), read_len, 0),
968 )
969 .ok();
970 });
971
972 self.check_queue();
973 }
974 }
975 });
976 }
977
978 fn write_done(&self, buffer: &'static mut [u8], length: usize) {
979 // Replace the buffer we used to do this write.
980 self.buffer.replace(buffer);
981
982 // Switch on which user of this capsule generated this callback.
983 self.current_user.take().map(|user| {
984 match user {
985 User::RegionManager(state) => {
986 match state {
987 ManagerTask::WriteHeader(processid, region) => {
988 // Now that we have written the header for the app
989 // we can store its region in its grant.
990 let _ = self.apps.enter(processid, |app, _kernel_data| {
991 // set region data in app's grant
992 app.region.replace(region);
993 });
994
995 // Update our metadata about where the next
996 // unallocated region is.
997 let next_header_addr = region.absolute_address + region.length;
998 self.next_unallocated_region_header_address
999 .set(next_header_addr);
1000
1001 // Erase the userspace accessible content of the region
1002 // before handing it off to an app.
1003 let res =
1004 self.erase_region_content(region.absolute_address, region.length);
1005 match res {
1006 Ok((next_erase_start, remaining_bytes)) => {
1007 self.current_user.set(User::RegionManager(
1008 // need to pass on where the next erase should start
1009 // how long it should be.
1010 ManagerTask::EraseRegion {
1011 processid,
1012 next_erase_start,
1013 remaining_bytes,
1014 },
1015 ));
1016 }
1017 Err(_e) => {
1018 // Not clear what to do here.
1019 self.current_user.clear();
1020 self.check_queue();
1021 }
1022 }
1023 }
1024 ManagerTask::EraseRegion {
1025 processid,
1026 next_erase_start,
1027 remaining_bytes,
1028 } => {
1029 if remaining_bytes > 0 {
1030 // We still have more to erase, so kick off
1031 // another one where we left off.
1032 let res =
1033 self.erase_region_content(next_erase_start, remaining_bytes);
1034 match res {
1035 Ok((next_erase_start, remaining_bytes)) => {
1036 self.current_user.set(User::RegionManager(
1037 ManagerTask::EraseRegion {
1038 processid,
1039 next_erase_start,
1040 remaining_bytes,
1041 },
1042 ));
1043 }
1044 Err(_e) => {
1045 // Not clear what to do here.
1046 self.current_user.clear();
1047 self.check_queue();
1048 }
1049 }
1050 } else {
1051 // Done erasing entire region. Can go on with
1052 // normal tasks.
1053 self.current_user.clear();
1054 self.check_queue();
1055 }
1056 }
1057 _ => {}
1058 }
1059 }
1060 User::App { processid } => {
1061 let _ = self.apps.enter(processid, move |app, kernel_data| {
1062 // clear pending syscall
1063 app.pending_operation = None;
1064 // Notify app that its write has completed.
1065 kernel_data
1066 .schedule_upcall(
1067 upcall::WRITE_DONE,
1068 (into_statuscode(Ok(())), length, 0),
1069 )
1070 .ok();
1071 });
1072 self.current_user.clear();
1073 self.check_queue();
1074 }
1075 }
1076 });
1077 }
1078}
1079
1080/// Provide an interface for userland.
1081impl<const APP_REGION_SIZE: usize> SyscallDriver
1082 for IsolatedNonvolatileStorage<'_, APP_REGION_SIZE>
1083{
1084 /// Command interface.
1085 ///
1086 /// Commands are selected by the lowest 8 bits of the first argument.
1087 ///
1088 /// ### `command_num`
1089 ///
1090 /// - `0`: Return Ok(()) if this driver is included on the platform.
1091 /// - `1`: Return the number of bytes available to each app.
1092 /// - `2`: Start a read from the nonvolatile storage.
1093 /// - `3`: Start a write to the nonvolatile_storage.
1094 fn command(
1095 &self,
1096 command_num: usize,
1097 offset_lo: usize,
1098 offset_hi: usize,
1099 processid: ProcessId,
1100 ) -> CommandReturn {
1101 match command_num {
1102 0 => CommandReturn::success(),
1103
1104 // For the get_bytes, read, and write syscalls we need to first
1105 // initialize the app's isolated nonvolatile storage. This involves
1106 // searching the storage area for an existing region that belongs to
1107 // this app. If we don't find an existing region we allocate a new
1108 // one. Only once the initialization is complete, can we service the
1109 // original syscall. So, we can store the syscall data in the app's
1110 // grant and handle it when initialization finishes.
1111 1 | 2 | 3 => {
1112 // We want to handle both 64-bit and 32-bit platforms, but on
1113 // 32-bit platforms shifting `offset_hi` doesn't make sense.
1114 let offset: usize = usize32s_to_usize(offset_lo, offset_hi);
1115 let nvm_command = match command_num {
1116 1 => NvmCommand::GetSize,
1117 2 => NvmCommand::Read { offset },
1118 3 => NvmCommand::Write { offset },
1119 _ => return CommandReturn::failure(ErrorCode::NOSUPPORT),
1120 };
1121
1122 // Enqueue the operation for the app.
1123 let res = self.enqueue_userspace_command(nvm_command, processid);
1124 match res {
1125 Ok(()) => CommandReturn::success(),
1126 Err(e) => CommandReturn::failure(e),
1127 }
1128 }
1129 _ => CommandReturn::failure(ErrorCode::NOSUPPORT),
1130 }
1131 }
1132
1133 fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
1134 self.apps.enter(processid, |_, _| {})
1135 }
1136}