capsules_extra/isolated_nonvolatile_storage_driver.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2025.
4
5//! This provides userspace access to nonvolatile storage.
6//!
7//! This driver provides isolation between individual userland applications.
8//! Each application only has access to its region of nonvolatile memory and
9//! cannot read/write to nonvolatile memory of other applications.
10//!
11//! Each app is assigned a fixed amount of nonvolatile memory. This amount is
12//! set at compile time.
13//!
14//! ## Storage Layout
15//!
16//! Example nonvolatile storage layout (note that `|` indicates bitwise
17//! concatenation):
18//!
19//! ```text
20//! ╒════════ ← Start of nonvolatile region
21//! ├──────── ← Start of App 1's region header
22//! │ Region version number (8 bits) | Region length (24 bits)
23//! │ App 1's ShortID (u32)
24//! │ XOR of previous two u32 fields (u32)
25//! ├──────── ← Start of App 1's Region ═╗
26//! │ ║
27//! │
28//! │ region 1
29//! │ length
30//! │
31//! │ ║
32//! │ ═╝
33//! ├──────── ← Start of App 2's region header
34//! │ Region version number (8 bits) | Region length (24 bits)
35//! │ App 2's ShortID (u32)
36//! │ XOR of previous two u32 fields (u32)
37//! ├──────── ← Start of App 2's Region ═╗
38//! │ ║
39//! │
40//! │
41//! │ region 2
42//! │ length
43//! │
44//! │
45//! │ ║
46//! ... ═╝
47//! ╘════════ ← End of userspace region
48//! ```
49//!
50//! ## Storage Initialization
51//!
52//! This capsule caches the location of an application's storage region in
53//! grant. This cached location is set on the first usage of this capsule.
54//!
55//! Here is a general high-level overview of what happens when an app makes its
56//! first syscall:
57//! 1. App engages with the capsule by making any syscall.
58//! 2. Capsule searches through storage to see if that app has an existing
59//! region.
60//! 3. a. If the capsule finds a matching region:
61//! - Cache the app's region information in its grant.
62//! b. If the capsule DOESN'T find a matching region:
63//! - Allocate a new region for that app.
64//! - Erase the region's usable area.
65//! 4. Handle the syscall that the app originally made.
66//! 5. When the syscall finishes, notify the app via upcall.
67//!
68//! ## Example Software Stack
69//!
70//! Here is a diagram of the expected stack with this capsule: Boxes are
71//! components and between the boxes are the traits that are the interfaces
72//! between components. This capsule only provides a userspace interface.
73//!
74//! ```text
75//! +------------------------------------------------------------------------+
76//! | |
77//! | userspace |
78//! | |
79//! +------------------------------------------------------------------------+
80//! kernel::Driver
81//! +------------------------------------------------------------------------+
82//! | |
83//! | isolated_nonvolatile_storage_driver::IsolatedNonvolatileStorage (this) |
84//! | |
85//! +------------------------------------------------------------------------+
86//! hil::nonvolatile_storage::NonvolatileStorage
87//! +------------------------------------------------------------------------+
88//! | |
89//! | Physical nonvolatile storage driver |
90//! | |
91//! +------------------------------------------------------------------------+
92//! ```
93//!
94
95use core::cmp;
96
97use kernel::errorcode::into_statuscode;
98use kernel::grant::{AllowRoCount, AllowRwCount, Grant, UpcallCount};
99use kernel::hil;
100use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
101use kernel::syscall::{CommandReturn, SyscallDriver};
102use kernel::utilities::cells::{OptionalCell, TakeCell};
103use kernel::utilities::copy_slice::CopyOrErr;
104use kernel::{ErrorCode, ProcessId};
105
106use capsules_core::driver;
107
108pub const DRIVER_NUM: usize = driver::NUM::IsolatedNvmStorage as usize;
109
110/// Recommended size for the buffer provided to this capsule.
111///
112/// This is enough space for a buffer to be used for reading/writing userspace
113/// data.
114pub const BUF_LEN: usize = 512;
115
116/// IDs for subscribed upcalls.
117mod upcall {
118 /// Get storage size done callback.
119 pub const GET_SIZE_DONE: usize = 0;
120 /// Read done callback.
121 pub const READ_DONE: usize = 1;
122 /// Write done callback.
123 pub const WRITE_DONE: usize = 2;
124 /// Number of upcalls.
125 pub const COUNT: u8 = 3;
126}
127
128/// Ids for read-only allow buffers
129mod ro_allow {
130 /// Setup a buffer to write bytes to the nonvolatile storage.
131 pub const WRITE: usize = 0;
132 /// The number of allow buffers the kernel stores for this grant
133 pub const COUNT: u8 = 1;
134}
135
136/// Ids for read-write allow buffers
137mod rw_allow {
138 /// Setup a buffer to read from the nonvolatile storage into.
139 pub const READ: usize = 0;
140 /// The number of allow buffers the kernel stores for this grant
141 pub const COUNT: u8 = 1;
142}
143
144#[derive(Clone, Copy, PartialEq, Debug)]
145#[repr(u8)]
146enum HeaderVersion {
147 V1 = 0x01,
148}
149
150// Current header version to allocate new regions with.
151const CURRENT_HEADER_VERSION: HeaderVersion = HeaderVersion::V1;
152
153/// Describes a region of nonvolatile memory that is assigned to a certain app.
154#[derive(Clone, Copy, Debug, PartialEq)]
155pub struct AppRegion {
156 /// The version is based on the capsule version and layout format in use
157 /// when the region was created. This is set to a fixed value for all new
158 /// regions. An existing region may have been created with a newer or
159 /// earlier version of this capsule and therefore might have a different
160 /// version than what we currently initialize new regions with.
161 version: HeaderVersion,
162 /// Absolute address to describe where an app's nonvolatile region starts.
163 /// Note that this is the address FOLLOWING the region's header.
164 offset: usize,
165 /// How many bytes allocated to a certain app. Note that this describes the
166 /// length of the usable storage region and does not include the region's
167 /// header.
168 length: usize,
169}
170
171// Metadata to be written before every app's region to describe the owner and
172// size of the region.
173#[derive(Clone, Copy, Debug)]
174struct AppRegionHeader {
175 /// An 8 bit version number concatenated with a 24 bit length value.
176 version_and_length: u32,
177 /// Unique per-app identifier. This comes from the Fixed variant of the
178 /// ShortID type.
179 shortid: u32,
180 /// xor between `version_and_length` and `shortid` fields. This serves as a
181 /// checksum.
182 xor: u32,
183}
184/// The size of the `AppRegionHeader` stored in the nonvolatile storage.
185const REGION_HEADER_LEN: usize = 3 * core::mem::size_of::<u32>();
186
187impl AppRegionHeader {
188 fn new(version: HeaderVersion, shortid: u32, length: usize) -> Option<Self> {
189 // check that length will fit in 3 bytes
190 if length > (2 << 23) {
191 return None;
192 }
193
194 let version_and_length = ((version as u8 as u32) << 24) | length as u32;
195
196 let xor = version_and_length ^ shortid;
197
198 Some(AppRegionHeader {
199 version_and_length,
200 shortid,
201 xor,
202 })
203 }
204
205 fn from_bytes(bytes: [u8; REGION_HEADER_LEN]) -> Option<Self> {
206 // first 4 bytes are split between a 8 bit version and 24 bit length
207 let version = bytes[0];
208 let length_slice = &bytes[1..4];
209 let version_and_length_slice = [version, length_slice[0], length_slice[1], length_slice[2]];
210 let version_and_length = u32::from_le_bytes(version_and_length_slice);
211
212 let shortid_slice = bytes[4..8].try_into().ok()?;
213 let shortid = u32::from_le_bytes(shortid_slice);
214
215 let xor_slice = bytes[8..12].try_into().ok()?;
216 let xor = u32::from_le_bytes(xor_slice);
217
218 Some(AppRegionHeader {
219 version_and_length,
220 shortid,
221 xor,
222 })
223 }
224
225 fn to_bytes(self) -> [u8; REGION_HEADER_LEN] {
226 let mut header_slice = [0; REGION_HEADER_LEN];
227
228 // copy version and length
229 let version_and_length_slice = u32::to_le_bytes(self.version_and_length);
230 let version_and_length_start_idx = 0;
231 let version_and_length_end_idx = version_and_length_slice.len();
232 header_slice[version_and_length_start_idx..version_and_length_end_idx]
233 .copy_from_slice(&version_and_length_slice);
234
235 // copy shortid
236 let shortid_slice = u32::to_le_bytes(self.shortid);
237 let shortid_start_idx = version_and_length_end_idx;
238 let shortid_end_idx = shortid_start_idx + shortid_slice.len();
239 header_slice[shortid_start_idx..shortid_end_idx].copy_from_slice(&shortid_slice);
240
241 // copy version and length
242 let xor_slice = u32::to_le_bytes(self.xor);
243 let xor_start_idx = shortid_end_idx;
244 let xor_end_idx = xor_start_idx + xor_slice.len();
245 header_slice[xor_start_idx..xor_end_idx].copy_from_slice(&xor_slice);
246
247 header_slice
248 }
249
250 fn is_valid(&self) -> bool {
251 self.version().is_some() && self.xor == (self.version_and_length ^ self.shortid)
252 }
253
254 fn version(&self) -> Option<HeaderVersion> {
255 // Need to do this since we can't pattern match against a method call.
256 const HEADER_V1: u8 = HeaderVersion::V1 as u8;
257
258 // Extract the 8 most significant bits from the concatenated version and
259 // length.
260 match (self.version_and_length >> 24) as u8 {
261 HEADER_V1 => Some(HeaderVersion::V1),
262 _ => None,
263 }
264 }
265
266 fn length(&self) -> u32 {
267 // Extract the 24 least significant bits from the concatenated version
268 // and length.
269 self.version_and_length & 0x00ffffff
270 }
271}
272
273/// Operation referencing a particular region.
274#[derive(Clone, Copy, PartialEq, Debug)]
275pub enum ManagerTask {
276 /// Read the contents of the header in the region. The `usize` is the
277 /// address of the start of the header.
278 DiscoverRegions(usize),
279 /// Write a valid header to the storage.
280 WriteHeader(ProcessId, AppRegion),
281 /// Erase the contents of a region. This supports using multiple nonvolatile
282 /// storage operations to complete the entire erase.
283 EraseRegion {
284 processid: ProcessId,
285 next_erase_start: usize,
286 remaining_bytes: usize,
287 },
288}
289
290/// What is currently using the underlying nonvolatile storage driver.
291#[derive(Clone, Copy, Debug)]
292pub enum User {
293 /// The operation is from a userspace process.
294 App { processid: ProcessId },
295 /// The operation is from this capsule.
296 RegionManager(ManagerTask),
297}
298
299/// The operation the process requested.
300#[derive(Clone, Copy, Debug)]
301pub enum NvmCommand {
302 GetSize,
303 Read { offset: usize },
304 Write { offset: usize },
305}
306
307impl NvmCommand {
308 fn offset(&self) -> usize {
309 match self {
310 NvmCommand::Read { offset } => *offset,
311 NvmCommand::Write { offset } => *offset,
312 NvmCommand::GetSize => 0,
313 }
314 }
315
316 fn upcall(&self) -> usize {
317 match self {
318 Self::GetSize => upcall::GET_SIZE_DONE,
319 Self::Write { offset: _ } => upcall::WRITE_DONE,
320 Self::Read { offset: _ } => upcall::READ_DONE,
321 }
322 }
323}
324
325/// State stored in the grant region on behalf of each app.
326#[derive(Default)]
327pub struct App {
328 /// Describe the location and size of an app's region (if it has been
329 /// initialized).
330 region: Option<AppRegion>,
331 /// Operation that will be handled once init sequence is complete.
332 pending_operation: Option<NvmCommand>,
333}
334
335/// Helper function to convert create a full, single usize value from two 32-bit
336/// values stored in usizes.
337///
338/// In C this would look like:
339///
340/// ```c
341/// size_t v = (hi << 32) | (uint32_t) lo;
342/// ```
343///
344/// This is useful when passing a machine-sized value (i.e. a `size_t`) via the
345/// system call interface in two 32-bit usize values. On a 32-bit machine this
346/// essentially has no effect; the full value is stored in the `lo` usize. On a
347/// 64-bit machine, this creates a usize by concatenating the hi and lo 32-bit
348/// values.
349///
350/// TODO
351/// ----
352///
353/// This can be more succinctly implemented using
354/// [`unbounded_shl()`](https://doc.rust-lang.org/stable/std/primitive.usize.html#method.unbounded_shl).
355/// However, that method is currently a nightly-only feature.
356#[inline]
357pub const fn usize32s_to_usize(lo: usize, hi: usize) -> usize {
358 if usize::BITS <= 32 {
359 // Just return the lo value since it has the bits we need.
360 lo
361 } else {
362 // Create a 64-bit value.
363 (lo & 0xFFFFFFFF) | (hi << 32)
364 }
365}
366
367pub struct IsolatedNonvolatileStorage<'a, const APP_REGION_SIZE: usize> {
368 /// The underlying physical storage device.
369 driver: &'a dyn hil::nonvolatile_storage::NonvolatileStorage<'a>,
370 /// Per-app state.
371 apps: Grant<
372 App,
373 UpcallCount<{ upcall::COUNT }>,
374 AllowRoCount<{ ro_allow::COUNT }>,
375 AllowRwCount<{ rw_allow::COUNT }>,
376 >,
377
378 /// Internal buffer for copying appslices into.
379 buffer: TakeCell<'static, [u8]>,
380 /// What issued the currently executing call. This can be an app or the
381 /// kernel.
382 current_user: OptionalCell<User>,
383
384 /// The first byte that is accessible from userspace.
385 userspace_start_address: usize,
386 /// How many bytes allocated to userspace.
387 userspace_length: usize,
388
389 /// Absolute address of the header of the next region of userspace that's
390 /// not allocated to an app yet. Each time an app uses this capsule, a new
391 /// region of storage will be handed out and this address will point to the
392 /// header of a new unallocated region.
393 next_unallocated_region_header_address: OptionalCell<usize>,
394}
395
396impl<'a, const APP_REGION_SIZE: usize> IsolatedNonvolatileStorage<'a, APP_REGION_SIZE> {
397 pub fn new(
398 driver: &'a dyn hil::nonvolatile_storage::NonvolatileStorage<'a>,
399 grant: Grant<
400 App,
401 UpcallCount<{ upcall::COUNT }>,
402 AllowRoCount<{ ro_allow::COUNT }>,
403 AllowRwCount<{ rw_allow::COUNT }>,
404 >,
405 userspace_start_address: usize,
406 userspace_length: usize,
407 buffer: &'static mut [u8],
408 ) -> Self {
409 Self {
410 driver,
411 apps: grant,
412 buffer: TakeCell::new(buffer),
413 current_user: OptionalCell::empty(),
414 userspace_start_address,
415 userspace_length,
416 next_unallocated_region_header_address: OptionalCell::empty(),
417 }
418 }
419
420 // Start reading app region headers.
421 fn start_region_traversal(&self) -> Result<(), ErrorCode> {
422 if self.current_user.is_some() {
423 // Can't traverse the regions right now because the underlying
424 // driver is already in use.
425 return Err(ErrorCode::BUSY);
426 }
427
428 let res = self.read_region_header(self.userspace_start_address);
429 match res {
430 Ok(()) => {
431 // Mark that we started the discover operation.
432 self.current_user
433 .set(User::RegionManager(ManagerTask::DiscoverRegions(
434 self.userspace_start_address,
435 )));
436 Ok(())
437 }
438 Err(e) => {
439 // We did not successfully start the discover, return the error.
440 Err(e)
441 }
442 }
443 }
444
445 fn allocate_app_region(&self, processid: ProcessId) -> Result<(), ErrorCode> {
446 // Can't allocate a region if we haven't previously traversed existing
447 // regions and found where they stop.
448 let new_header_addr = self
449 .next_unallocated_region_header_address
450 .get()
451 .ok_or(ErrorCode::FAIL)?;
452
453 // Get an app's write_id (same as ShortID) for saving to region header.
454 // Note that if an app doesn't have the valid permissions, it will be
455 // unable to create storage regions.
456 let write_id = processid
457 .get_storage_permissions()
458 .ok_or(ErrorCode::NOSUPPORT)?
459 .get_write_id()
460 .ok_or(ErrorCode::NOSUPPORT)?;
461
462 let region = AppRegion {
463 version: CURRENT_HEADER_VERSION,
464 // Have this region start where all the existing regions end.
465 // Note that the app's actual region starts after the region header.
466 offset: new_header_addr + REGION_HEADER_LEN,
467 length: APP_REGION_SIZE,
468 };
469
470 // fail if new region is outside userspace area
471 if region.offset > self.userspace_start_address + self.userspace_length
472 || region.offset + region.length > self.userspace_start_address + self.userspace_length
473 {
474 return Err(ErrorCode::NOMEM);
475 }
476
477 let Some(header) = AppRegionHeader::new(region.version, write_id, region.length) else {
478 return Err(ErrorCode::FAIL);
479 };
480
481 // Write this new region header to the end of the existing regions.
482 let res = self.write_region_header(®ion, &header, new_header_addr);
483 match res {
484 Ok(()) => {
485 // Mark that we started the initialize region task.
486 self.current_user
487 .set(User::RegionManager(ManagerTask::WriteHeader(
488 processid, region,
489 )));
490 Ok(())
491 }
492 Err(e) => {
493 // We did not successfully start the region initialization,
494 // return the error.
495 Err(e)
496 }
497 }
498 }
499
500 // Read the header of an app's storage region. The region_header_address
501 // argument describes the start of the **header** and not the usable region
502 // itself.
503 fn read_region_header(&self, region_header_address: usize) -> Result<(), ErrorCode> {
504 self.check_header_access(region_header_address, APP_REGION_SIZE)?;
505
506 self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
507 self.driver
508 .read(buffer, region_header_address, REGION_HEADER_LEN)
509 })
510 }
511
512 // Write the header of an app's storage region. The region_header_address
513 // argument describes the start of the **header** and not the usable region
514 // itself.
515 fn write_region_header(
516 &self,
517 region: &AppRegion,
518 region_header: &AppRegionHeader,
519 region_header_address: usize,
520 ) -> Result<(), ErrorCode> {
521 self.check_header_access(region.offset, region.length)?;
522
523 let header_slice = region_header.to_bytes();
524
525 self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
526 let _ = buffer
527 .get_mut(0..REGION_HEADER_LEN)
528 .ok_or(ErrorCode::NOMEM)?
529 .copy_from_slice_or_err(
530 header_slice
531 .get(0..REGION_HEADER_LEN)
532 .ok_or(ErrorCode::NOMEM)?,
533 );
534
535 self.driver
536 .write(buffer, region_header_address, REGION_HEADER_LEN)
537 })
538 }
539
540 fn erase_region_content(
541 &self,
542 offset: usize,
543 length: usize,
544 ) -> Result<(usize, usize), ErrorCode> {
545 self.check_header_access(offset, length)?;
546
547 self.buffer.take().map_or(Err(ErrorCode::NOMEM), |buffer| {
548 let active_len = cmp::min(length, buffer.len());
549
550 // Clear the erase buffer in case there was any data
551 // remaining from a previous operation.
552 for c in buffer.iter_mut() {
553 *c = 0xFF;
554 }
555
556 // how many more bytes to erase after this operation
557 let remaining_len = if length > buffer.len() {
558 length - buffer.len()
559 } else {
560 0
561 };
562
563 let next_erase_start = offset + active_len;
564
565 self.driver
566 .write(buffer, offset, active_len)
567 .and(Ok((next_erase_start, remaining_len)))
568 })
569 }
570
571 // Returns `Ok()` with the address of the next header to be read if a new
572 // header read was started.
573 fn header_read_done(&self, region_header_address: usize) -> Result<Option<usize>, ErrorCode> {
574 // Cases when a header read completes:
575 // 1. Read a valid header
576 // - The valid header belongs to a Tock app (might not be currently
577 // running).
578 // - Search for the owner of the region within the apps.
579 // - Find the owner of the region that has a matching shortid (from
580 // the header).
581 // - Then, startup another read operation to read the header of the
582 // next storage region.
583 // 2. Read an invalid header
584 // - We've reached the end of all previously allocated regions.
585 // - Allocate new app region here.
586
587 let header = self.buffer.map_or(Err(ErrorCode::NOMEM), |buffer| {
588 // Need to copy over bytes since we need to convert a &[u8] into a
589 // [u8; REGION_HEADER_LEN]. The &[u8] refers to a slice of size
590 // BUF_LEN (which could be different than REGION_HEADER_LEN). Using
591 // buffer.try_into() will fail at runtime since the underlying
592 // buffer is not the same length as what we're trying to convert
593 // into.
594 let mut header_buffer = [0; REGION_HEADER_LEN];
595 header_buffer
596 .copy_from_slice_or_err(&buffer[..REGION_HEADER_LEN])
597 .or(Err(ErrorCode::FAIL))?;
598
599 // reconstruct header from bytes we just read
600 AppRegionHeader::from_bytes(header_buffer).ok_or(ErrorCode::FAIL)
601 })?;
602
603 if header.is_valid() {
604 // Find the app with the corresponding shortid.
605 for app in self.apps.iter() {
606 let processid = app.processid();
607 // Skip an app if it doesn't have the proper storage
608 // permissions.
609 let write_id = match processid.get_storage_permissions() {
610 Some(perms) => match perms.get_write_id() {
611 Some(write_id) => write_id,
612 None => continue,
613 },
614 None => continue,
615 };
616 if write_id == header.shortid {
617 app.enter(|app, _kernel_data| {
618 if app.region.is_none() {
619 let version = header.version().ok_or(ErrorCode::FAIL)?;
620 let region = AppRegion {
621 version,
622 // The app's actual region starts after the
623 // region header.
624 offset: region_header_address + REGION_HEADER_LEN,
625 length: header.length() as usize,
626 };
627 app.region.replace(region);
628 }
629 Ok::<(), ErrorCode>(())
630 })?;
631 break;
632 }
633 }
634
635 let next_header_address =
636 region_header_address + REGION_HEADER_LEN + header.length() as usize;
637 // Kick off another read for the next region.
638 self.read_region_header(next_header_address)
639 .and(Ok(Some(next_header_address)))
640 } else {
641 // This is the end of the region traversal. If a header is invalid,
642 // we've reached the end of all previously allocated regions.
643
644 // Save this region header address so that we can allocate new
645 // regions here later.
646 self.next_unallocated_region_header_address
647 .set(region_header_address);
648
649 Ok(None)
650 }
651 }
652
653 fn check_userspace_perms(
654 &self,
655 processid: ProcessId,
656 command: NvmCommand,
657 ) -> Result<(), ErrorCode> {
658 let perms = processid
659 .get_storage_permissions()
660 .ok_or(ErrorCode::NOSUPPORT)?;
661 let write_id = perms.get_write_id().ok_or(ErrorCode::NOSUPPORT)?;
662 match command {
663 NvmCommand::Read { offset: _ } => perms
664 .check_read_permission(write_id)
665 .then_some(())
666 .ok_or(ErrorCode::NOSUPPORT),
667 NvmCommand::Write { offset: _ } => perms
668 .check_modify_permission(write_id)
669 .then_some(())
670 .ok_or(ErrorCode::NOSUPPORT),
671 NvmCommand::GetSize => {
672 // If we have a `write_id` then we can return the size.
673 Ok(())
674 }
675 }
676 }
677
678 fn check_userspace_access(
679 &self,
680 offset: usize,
681 length: usize,
682 region: &AppRegion,
683 ) -> Result<(), ErrorCode> {
684 // Check that access is within this app's isolated nonvolatile region.
685 // This is to prevent an app from reading/writing to another app's
686 // nonvolatile storage.
687
688 if offset >= region.length || length > region.length || offset + length > region.length {
689 return Err(ErrorCode::INVAL);
690 }
691
692 Ok(())
693 }
694
695 fn check_header_access(&self, offset: usize, length: usize) -> Result<(), ErrorCode> {
696 // Check that we're within the entire userspace region.
697 if offset < self.userspace_start_address
698 || offset >= self.userspace_start_address + self.userspace_length
699 || length > self.userspace_length
700 || offset + length >= self.userspace_start_address + self.userspace_length
701 {
702 return Err(ErrorCode::INVAL);
703 }
704
705 Ok(())
706 }
707
708 // Check so see if we are doing something. If not, go ahead and do this
709 // command. If so, this is queued and will be run when the pending command
710 // completes.
711 fn enqueue_userspace_command(
712 &self,
713 command: NvmCommand,
714 processid: ProcessId,
715 ) -> Result<(), ErrorCode> {
716 self.check_userspace_perms(processid, command)?;
717
718 self.apps
719 .enter(processid, |app, _kernel_data| {
720 if app.pending_operation.is_some() {
721 return Err(ErrorCode::BUSY);
722 }
723 app.pending_operation = Some(command);
724 Ok(())
725 })
726 .unwrap_or_else(|err| Err(err.into()))?;
727
728 self.check_queue();
729 Ok(())
730 }
731
732 fn check_queue(&self) {
733 if self.current_user.is_some() {
734 // If the driver is busy we can't start a new operation and do not
735 // need to check the queue.
736 return;
737 }
738
739 // If this is none, we haven't traversed the existing regions yet.
740 if self.next_unallocated_region_header_address.is_none() {
741 match self.start_region_traversal() {
742 Ok(()) => {
743 // We started an operation so we can return and let that
744 // operation finish.
745 return;
746 }
747 Err(_e) => {
748 // We did not start the traversal which is a problem. This
749 // shouldn't happen, but if it does then we could overwrite
750 // existing regions.
751 return;
752 }
753 }
754 }
755
756 // Iterate apps and run an operation if one is pending.
757 for app in self.apps.iter() {
758 let processid = app.processid();
759 let started = app.enter(|app, kernel_data| {
760 match app.pending_operation {
761 Some(nvm_command) => {
762 if app.region.is_none() {
763 // This app needs its region allocated.
764 self.allocate_app_region(processid).is_ok()
765 } else {
766 let res = self.handle_syscall(nvm_command, processid, app, kernel_data);
767 match res {
768 Ok(started_operation) => started_operation,
769 Err(e) => {
770 app.pending_operation = None;
771 kernel_data
772 .schedule_upcall(
773 nvm_command.upcall(),
774 (into_statuscode(Err(e)), 0, 0),
775 )
776 .ok();
777
778 false
779 }
780 }
781 }
782 }
783 None => false,
784 }
785 });
786 if started {
787 break;
788 }
789 }
790 }
791
792 fn handle_syscall(
793 &self,
794 command: NvmCommand,
795 processid: ProcessId,
796 app: &mut App,
797 kernel_data: &kernel::grant::GrantKernelData,
798 ) -> Result<bool, ErrorCode> {
799 match command {
800 NvmCommand::GetSize => {
801 match app.region {
802 Some(region) => {
803 // clear pending syscall
804 app.pending_operation = None;
805 // signal app with the result
806 kernel_data
807 .schedule_upcall(
808 upcall::GET_SIZE_DONE,
809 (into_statuscode(Ok(())), region.length, 0),
810 )
811 .ok();
812 Ok(false)
813 }
814 None => Err(ErrorCode::NOMEM),
815 }
816 }
817
818 NvmCommand::Read { offset: _ } | NvmCommand::Write { offset: _ } => {
819 // Get the length of the correct allowed buffer.
820 let allow_buf_len = match command {
821 NvmCommand::Read { offset: _ } => kernel_data
822 .get_readwrite_processbuffer(rw_allow::READ)
823 .map_or(0, |read| read.len()),
824 NvmCommand::Write { offset: _ } => kernel_data
825 .get_readonly_processbuffer(ro_allow::WRITE)
826 .map_or(0, |read| read.len()),
827 NvmCommand::GetSize => 0,
828 };
829
830 // Check that the matching allowed buffer exists.
831 if allow_buf_len == 0 {
832 return Err(ErrorCode::RESERVE);
833 }
834
835 // Fail if the app doesn't have a region assigned to it.
836 let Some(app_region) = &app.region else {
837 return Err(ErrorCode::NOMEM);
838 };
839
840 let command_offset = command.offset();
841
842 self.check_userspace_access(command_offset, allow_buf_len, app_region)?;
843
844 // Need to copy bytes if this is a write!
845 if let NvmCommand::Write { offset: _ } = command {
846 let _ = kernel_data
847 .get_readonly_processbuffer(ro_allow::WRITE)
848 .and_then(|write| {
849 write.enter(|app_buffer| {
850 self.buffer.map(|kernel_buffer| {
851 // Check that the internal buffer and
852 // the buffer that was allowed are long
853 // enough.
854 let write_len = cmp::min(allow_buf_len, kernel_buffer.len());
855
856 let d = &app_buffer[0..write_len];
857 for (i, c) in kernel_buffer[0..write_len].iter_mut().enumerate()
858 {
859 *c = d[i].get();
860 }
861 });
862 })
863 });
864 }
865
866 // Calculate where we want to actually read from in the
867 // physical storage. Note that the given offset for this
868 // command is with respect to the app's region address
869 // space. This means that userspace accesses start at 0
870 // which is the start of the app's region.
871 let physical_address =
872 app_region.offset + command_offset + self.userspace_start_address;
873
874 let res = self
875 .buffer
876 .take()
877 .map_or(Err(ErrorCode::RESERVE), |buffer| {
878 // Check that the internal buffer and the buffer that was
879 // allowed are long enough.
880 let active_len_buf = cmp::min(allow_buf_len, buffer.len());
881
882 match command {
883 NvmCommand::Read { offset: _ } => self
884 .driver
885 .read(buffer, physical_address, active_len_buf)
886 .or(Err(ErrorCode::FAIL)),
887 NvmCommand::Write { offset: _ } => self
888 .driver
889 .write(buffer, physical_address, active_len_buf)
890 .or(Err(ErrorCode::FAIL)),
891 NvmCommand::GetSize => Err(ErrorCode::FAIL),
892 }
893 });
894 match res {
895 Ok(()) => {
896 self.current_user.set(User::App { processid });
897 Ok(true)
898 }
899 Err(e) => Err(e),
900 }
901 }
902 }
903 }
904}
905
906/// This is the callback client for the underlying physical storage driver.
907impl<const APP_REGION_SIZE: usize> hil::nonvolatile_storage::NonvolatileStorageClient
908 for IsolatedNonvolatileStorage<'_, APP_REGION_SIZE>
909{
910 fn read_done(&self, buffer: &'static mut [u8], length: usize) {
911 // Switch on which user of this capsule generated this callback.
912 self.current_user.take().map(|user| {
913 match user {
914 User::RegionManager(state) => {
915 self.buffer.replace(buffer);
916 match state {
917 ManagerTask::DiscoverRegions(address) => {
918 let res = self.header_read_done(address);
919 match res {
920 Ok(addr) => match addr {
921 Some(next_header_address) => {
922 self.current_user.set(User::RegionManager(
923 ManagerTask::DiscoverRegions(next_header_address),
924 ));
925 }
926 None => {
927 // We finished the scan of existing
928 // regions. Now we can check the queue
929 // to see if there is any work to be
930 // done.
931 self.check_queue();
932 }
933 },
934 Err(_e) => {
935 // Not clear what to do here.
936 self.check_queue();
937 }
938 }
939 }
940 _ => {}
941 }
942 }
943 User::App { processid } => {
944 let _ = self.apps.enter(processid, move |app, kernel_data| {
945 // Need to copy in the contents of the buffer
946 let read_len = kernel_data
947 .get_readwrite_processbuffer(rw_allow::READ)
948 .and_then(|read| {
949 read.mut_enter(|app_buffer| {
950 let read_len = cmp::min(app_buffer.len(), length);
951
952 let d = &app_buffer[0..read_len];
953 for (i, c) in buffer[0..read_len].iter().enumerate() {
954 d[i].set(*c);
955 }
956 read_len
957 })
958 })
959 .unwrap_or(0);
960
961 // Replace the buffer we used to do this read.
962 self.buffer.replace(buffer);
963
964 // clear pending syscall
965 app.pending_operation = None;
966 // And then signal the app.
967 kernel_data
968 .schedule_upcall(
969 upcall::READ_DONE,
970 (into_statuscode(Ok(())), read_len, 0),
971 )
972 .ok();
973 });
974
975 self.check_queue();
976 }
977 }
978 });
979 }
980
981 fn write_done(&self, buffer: &'static mut [u8], length: usize) {
982 // Replace the buffer we used to do this write.
983 self.buffer.replace(buffer);
984
985 // Switch on which user of this capsule generated this callback.
986 self.current_user.take().map(|user| {
987 match user {
988 User::RegionManager(state) => {
989 match state {
990 ManagerTask::WriteHeader(processid, region) => {
991 // Now that we have written the header for the app
992 // we can store its region in its grant.
993 let _ = self.apps.enter(processid, |app, _kernel_data| {
994 // set region data in app's grant
995 app.region.replace(region);
996 });
997
998 // Update our metadata about where the next
999 // unallocated region is.
1000 let next_header_addr = region.offset + region.length;
1001 self.next_unallocated_region_header_address
1002 .set(next_header_addr);
1003
1004 // Erase the userspace accessible content of the region
1005 // before handing it off to an app.
1006 let res = self.erase_region_content(region.offset, region.length);
1007 match res {
1008 Ok((next_erase_start, remaining_bytes)) => {
1009 self.current_user.set(User::RegionManager(
1010 // need to pass on where the next erase should start
1011 // how long it should be.
1012 ManagerTask::EraseRegion {
1013 processid,
1014 next_erase_start,
1015 remaining_bytes,
1016 },
1017 ));
1018 }
1019 Err(_e) => {
1020 // Not clear what to do here.
1021 self.current_user.clear();
1022 self.check_queue();
1023 }
1024 }
1025 }
1026 ManagerTask::EraseRegion {
1027 processid,
1028 next_erase_start,
1029 remaining_bytes,
1030 } => {
1031 if remaining_bytes > 0 {
1032 // We still have more to erase, so kick off
1033 // another one where we left off.
1034 let res =
1035 self.erase_region_content(next_erase_start, remaining_bytes);
1036 match res {
1037 Ok((next_erase_start, remaining_bytes)) => {
1038 self.current_user.set(User::RegionManager(
1039 ManagerTask::EraseRegion {
1040 processid,
1041 next_erase_start,
1042 remaining_bytes,
1043 },
1044 ));
1045 }
1046 Err(_e) => {
1047 // Not clear what to do here.
1048 self.current_user.clear();
1049 self.check_queue();
1050 }
1051 }
1052 } else {
1053 // Done erasing entire region. Can go on with
1054 // normal tasks.
1055 self.current_user.clear();
1056 self.check_queue();
1057 }
1058 }
1059 _ => {}
1060 }
1061 }
1062 User::App { processid } => {
1063 let _ = self.apps.enter(processid, move |app, kernel_data| {
1064 // clear pending syscall
1065 app.pending_operation = None;
1066 // Notify app that its write has completed.
1067 kernel_data
1068 .schedule_upcall(
1069 upcall::WRITE_DONE,
1070 (into_statuscode(Ok(())), length, 0),
1071 )
1072 .ok();
1073 });
1074 self.current_user.clear();
1075 self.check_queue();
1076 }
1077 }
1078 });
1079 }
1080}
1081
1082/// Provide an interface for userland.
1083impl<const APP_REGION_SIZE: usize> SyscallDriver
1084 for IsolatedNonvolatileStorage<'_, APP_REGION_SIZE>
1085{
1086 /// Command interface.
1087 ///
1088 /// Commands are selected by the lowest 8 bits of the first argument.
1089 ///
1090 /// ### `command_num`
1091 ///
1092 /// - `0`: Return Ok(()) if this driver is included on the platform.
1093 /// - `1`: Return the number of bytes available to each app.
1094 /// - `2`: Start a read from the nonvolatile storage.
1095 /// - `3`: Start a write to the nonvolatile_storage.
1096 fn command(
1097 &self,
1098 command_num: usize,
1099 offset_lo: usize,
1100 offset_hi: usize,
1101 processid: ProcessId,
1102 ) -> CommandReturn {
1103 match command_num {
1104 0 => CommandReturn::success(),
1105
1106 // For the get_bytes, read, and write syscalls we need to first
1107 // initialize the app's isolated nonvolatile storage. This involves
1108 // searching the storage area for an existing region that belongs to
1109 // this app. If we don't find an existing region we allocate a new
1110 // one. Only once the initialization is complete, can we service the
1111 // original syscall. So, we can store the syscall data in the app's
1112 // grant and handle it when initialization finishes.
1113 1 | 2 | 3 => {
1114 // We want to handle both 64-bit and 32-bit platforms, but on
1115 // 32-bit platforms shifting `offset_hi` doesn't make sense.
1116 let offset: usize = usize32s_to_usize(offset_lo, offset_hi);
1117 let nvm_command = match command_num {
1118 1 => NvmCommand::GetSize,
1119 2 => NvmCommand::Read { offset },
1120 3 => NvmCommand::Write { offset },
1121 _ => return CommandReturn::failure(ErrorCode::NOSUPPORT),
1122 };
1123
1124 // Enqueue the operation for the app.
1125 let res = self.enqueue_userspace_command(nvm_command, processid);
1126 match res {
1127 Ok(()) => CommandReturn::success(),
1128 Err(e) => CommandReturn::failure(e),
1129 }
1130 }
1131 _ => CommandReturn::failure(ErrorCode::NOSUPPORT),
1132 }
1133 }
1134
1135 fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
1136 self.apps.enter(processid, |_, _| {})
1137 }
1138}