capsules_extra/net/udp/driver.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! UDP userspace interface for transmit and receive.
6//!
7//! Implements a userspace interface for sending and receiving UDP messages.
8//! Processes use this driver to send UDP packets from a common interface
9//! and bind to UDP ports for receiving packets.
10//! Also exposes a list of interface addresses to the application (currently
11//! hard-coded).
12
13use crate::net::ipv6::ip_utils::IPAddr;
14use crate::net::network_capabilities::NetworkCapability;
15use crate::net::stream::encode_u16;
16use crate::net::stream::encode_u8;
17use crate::net::stream::SResult;
18use crate::net::udp::udp_port_table::{PortQuery, UdpPortManager};
19use crate::net::udp::udp_recv::UDPRecvClient;
20use crate::net::udp::udp_send::{UDPSendClient, UDPSender};
21use crate::net::util::host_slice_to_u16;
22
23use core::cell::Cell;
24use core::mem::size_of;
25use core::{cmp, mem};
26
27use kernel::capabilities::UdpDriverCapability;
28use kernel::debug;
29use kernel::grant::{AllowRoCount, AllowRwCount, Grant, UpcallCount};
30use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
31use kernel::syscall::{CommandReturn, SyscallDriver};
32use kernel::utilities::cells::MapCell;
33use kernel::utilities::leasable_buffer::SubSliceMut;
34use kernel::{ErrorCode, ProcessId};
35
36use capsules_core::driver;
37pub const DRIVER_NUM: usize = driver::NUM::Udp as usize;
38
39/// IDs for subscribed upcalls.
40mod upcall {
41 /// Callback for when packet is received. If no port has been bound, return
42 /// `RESERVE` to indicate that port binding is is a prerequisite to
43 /// reception.
44 pub const PACKET_RECEIVED: usize = 0;
45 /// Callback for when packet is transmitted. Notably, this callback receives
46 /// the result of the send_done callback from udp_send.rs, which does not
47 /// currently pass information regarding whether packets were acked at the
48 /// link layer.
49 pub const PACKET_TRANSMITTED: usize = 1;
50 /// Number of upcalls.
51 pub const COUNT: u8 = 2;
52}
53
54/// Ids for read-only allow buffers
55mod ro_allow {
56 /// Write buffer. Contains the UDP payload to be transmitted. Returns SIZE
57 /// if the passed buffer is too long, and NOSUPPORT if an invalid
58 /// `allow_num` is passed.
59 pub const WRITE: usize = 0;
60 /// The number of allow buffers the kernel stores for this grant
61 pub const COUNT: u8 = 1;
62}
63
64/// Ids for read-write allow buffers
65mod rw_allow {
66 /// Read buffer. Will contain the received payload.
67 pub const READ: usize = 0;
68 /// Config buffer. Used to contain miscellaneous data associated with some
69 /// commands, namely source/destination addresses and ports.
70 pub const CFG: usize = 1;
71 /// Rx config buffer. Used to contain source/destination addresses and ports
72 /// for receives (separate from `2` because receives may be waiting for an
73 /// incoming packet asynchronously).
74 pub const RX_CFG: usize = 2;
75 /// The number of allow buffers the kernel stores for this grant
76 pub const COUNT: u8 = 3;
77}
78
79#[derive(Debug, Copy, Clone, Eq, PartialEq)]
80pub struct UDPEndpoint {
81 addr: IPAddr,
82 port: u16,
83}
84
85impl UDPEndpoint {
86 /// This function serializes the `UDPEndpoint` into the provided buffer.
87 ///
88 /// # Arguments
89 ///
90 /// - `buf` - A mutable buffer to serialize the `UDPEndpoint` into
91 /// - `offset` - The current offset into the provided buffer
92 ///
93 /// # Return Value
94 ///
95 /// This function returns the new offset into the buffer wrapped in an
96 /// SResult.
97 pub fn encode(&self, buf: &mut [u8], offset: usize) -> SResult<usize> {
98 stream_len_cond!(buf, size_of::<UDPEndpoint>() + offset);
99
100 let mut off = offset;
101 for i in 0..16 {
102 off = enc_consume!(buf, off; encode_u8, self.addr.0[i]);
103 }
104 off = enc_consume!(buf, off; encode_u16, self.port);
105 stream_done!(off, off);
106 }
107
108 /// This function checks if the UDPEndpoint specified is the 0 address + 0 port.
109 pub fn is_zero(&self) -> bool {
110 self.addr.is_unspecified() && self.port == 0
111 }
112}
113
114#[derive(Default)]
115pub struct App {
116 pending_tx: Option<[UDPEndpoint; 2]>,
117 bound_port: Option<UDPEndpoint>,
118}
119
120#[allow(dead_code)]
121pub struct UDPDriver<'a> {
122 /// UDP sender
123 sender: &'a dyn UDPSender<'a>,
124
125 /// Grant of apps that use this radio driver.
126 apps: Grant<
127 App,
128 UpcallCount<{ upcall::COUNT }>,
129 AllowRoCount<{ ro_allow::COUNT }>,
130 AllowRwCount<{ rw_allow::COUNT }>,
131 >,
132 /// ID of app whose transmission request is being processed.
133 current_app: Cell<Option<ProcessId>>,
134
135 /// List of IP Addresses of the interfaces on the device
136 interface_list: &'static [IPAddr],
137
138 /// Maximum length payload that an app can transmit via this driver
139 max_tx_pyld_len: usize,
140
141 /// UDP bound port table (manages kernel bindings)
142 port_table: &'static UdpPortManager,
143
144 kernel_buffer: MapCell<SubSliceMut<'static, u8>>,
145
146 driver_send_cap: &'static dyn UdpDriverCapability,
147
148 net_cap: &'static NetworkCapability,
149}
150
151impl<'a> UDPDriver<'a> {
152 pub fn new(
153 sender: &'a dyn UDPSender<'a>,
154 grant: Grant<
155 App,
156 UpcallCount<{ upcall::COUNT }>,
157 AllowRoCount<{ ro_allow::COUNT }>,
158 AllowRwCount<{ rw_allow::COUNT }>,
159 >,
160 interface_list: &'static [IPAddr],
161 max_tx_pyld_len: usize,
162 port_table: &'static UdpPortManager,
163 kernel_buffer: SubSliceMut<'static, u8>,
164 driver_send_cap: &'static dyn UdpDriverCapability,
165 net_cap: &'static NetworkCapability,
166 ) -> UDPDriver<'a> {
167 UDPDriver {
168 sender,
169 apps: grant,
170 current_app: Cell::new(None),
171 interface_list,
172 max_tx_pyld_len,
173 port_table,
174 kernel_buffer: MapCell::new(kernel_buffer),
175 driver_send_cap,
176 net_cap,
177 }
178 }
179
180 /// If the driver is currently idle and there are pending transmissions,
181 /// pick an app with a pending transmission and return its `ProcessId`.
182 fn get_next_tx_if_idle(&self) -> Option<ProcessId> {
183 if self.current_app.get().is_some() {
184 // Tx already in progress
185 return None;
186 }
187 let mut pending_app = None;
188 for app in self.apps.iter() {
189 let processid = app.processid();
190 app.enter(|app, _| {
191 if app.pending_tx.is_some() {
192 pending_app = Some(processid);
193 }
194 });
195 if pending_app.is_some() {
196 break;
197 }
198 }
199 pending_app
200 }
201
202 /// Performs `processid`'s pending transmission asynchronously. If the
203 /// transmission is not successful, the error is returned to the app via its
204 /// `tx_callback`. Assumes that the driver is currently idle and the app has
205 /// a pending transmission.
206 #[inline]
207 fn perform_tx_async(&self, processid: ProcessId) {
208 let result = self.perform_tx_sync(processid);
209 if result != Ok(()) {
210 let _ = self.apps.enter(processid, |_app, upcalls| {
211 let _ = upcalls.schedule_upcall(
212 upcall::PACKET_TRANSMITTED,
213 (kernel::errorcode::into_statuscode(result), 0, 0),
214 );
215 });
216 }
217 }
218
219 /// Performs `processid`'s pending transmission synchronously. The result is
220 /// returned immediately to the app. Assumes that the driver is currently
221 /// idle and the app has a pending transmission.
222 #[inline]
223 fn perform_tx_sync(&self, processid: ProcessId) -> Result<(), ErrorCode> {
224 self.apps.enter(processid, |app, kernel_data| {
225 let addr_ports = match app.pending_tx.take() {
226 Some(pending_tx) => pending_tx,
227 None => {
228 return Ok(());
229 }
230 };
231 let dst_addr = addr_ports[1].addr;
232 let dst_port = addr_ports[1].port;
233 let src_port = addr_ports[0].port;
234
235 // Send UDP payload. Copy payload into packet buffer held by this driver, then queue
236 // it on the udp_mux.
237 let result = kernel_data
238 .get_readonly_processbuffer(ro_allow::WRITE)
239 .and_then(|write| {
240 write.enter(|payload| {
241 self.kernel_buffer.take().map_or(
242 Err(ErrorCode::NOMEM),
243 |mut kernel_buffer| {
244 if payload.len() > kernel_buffer.len() {
245 self.kernel_buffer.replace(kernel_buffer);
246 return Err(ErrorCode::SIZE);
247 }
248 payload.copy_to_slice(&mut kernel_buffer[0..payload.len()]);
249 kernel_buffer.slice(0..payload.len());
250 match self.sender.driver_send_to(
251 dst_addr,
252 dst_port,
253 src_port,
254 kernel_buffer,
255 self.driver_send_cap,
256 self.net_cap,
257 ) {
258 Ok(()) => Ok(()),
259 Err(mut buf) => {
260 buf.reset();
261 self.kernel_buffer.replace(buf);
262 Err(ErrorCode::FAIL)
263 }
264 }
265 },
266 )
267 })
268 })
269 .unwrap_or(Err(ErrorCode::NOMEM));
270 if result == Ok(()) {
271 self.current_app.set(Some(processid));
272 }
273 result
274 })?
275 }
276
277 /// Schedule the next transmission if there is one pending. Performs the
278 /// transmission eventually, returning any errors via asynchronous callbacks.
279 #[inline]
280 #[allow(dead_code)]
281 fn do_next_tx_queued(&self) {
282 self.get_next_tx_if_idle()
283 .map(|processid| self.perform_tx_async(processid));
284 }
285
286 /// Schedule the next transmission if there is one pending. If the next
287 /// transmission happens to be the one that was just queued, then the
288 /// transmission is immediate. Hence, errors must be returned immediately.
289 /// On the other hand, if it is some other app, then return any errors via
290 /// callbacks.
291 #[inline]
292 fn do_next_tx_immediate(&self, new_processid: ProcessId) -> Result<u32, ErrorCode> {
293 self.get_next_tx_if_idle().map_or(Ok(0), |processid| {
294 if processid == new_processid {
295 let sync_result = self.perform_tx_sync(processid);
296 if sync_result == Ok(()) {
297 Ok(1) //Indicates packet passed to radio
298 } else {
299 Err(ErrorCode::try_from(sync_result).unwrap())
300 }
301 } else {
302 self.perform_tx_async(processid);
303 Ok(0) //indicates async transmission
304 }
305 })
306 }
307
308 #[inline]
309 fn parse_ip_port_pair(&self, buf: &[u8]) -> Option<UDPEndpoint> {
310 if buf.len() != size_of::<UDPEndpoint>() {
311 debug!(
312 "[parse] len is {:?}, not {:?} as expected",
313 buf.len(),
314 size_of::<UDPEndpoint>()
315 );
316 None
317 } else {
318 let (a, p) = buf.split_at(size_of::<IPAddr>());
319 let mut addr = IPAddr::new();
320 addr.0.copy_from_slice(a);
321
322 let pair = UDPEndpoint {
323 addr,
324 port: host_slice_to_u16(p),
325 };
326 Some(pair)
327 }
328 }
329}
330
331impl SyscallDriver for UDPDriver<'_> {
332 /// UDP control
333 ///
334 /// ### `command_num`
335 ///
336 /// - `0`: Driver existence check.
337 /// - `1`: Get the interface list app_cfg (out): 16 * `n` bytes: the list of
338 /// interface IPv6 addresses, length limited by `app_cfg` length. Returns
339 /// INVAL if the cfg buffer is the wrong size, or not available.
340 /// - `2`: Transmit payload.
341 /// - Returns BUSY is this process already has a pending tx.
342 /// - Returns INVAL if no valid buffer has been loaded into the write
343 /// buffer, or if the config buffer is the wrong length, or if the
344 /// destination and source port/address pairs cannot be parsed.
345 /// - Otherwise, returns the result of do_next_tx_immediate(). Notably, a
346 /// successful transmit can produce two different success values. If
347 /// success is returned, this simply means that the packet was queued.
348 /// In this case, the app still still needs to wait for a callback to
349 /// check if any errors occurred before the packet was passed to the
350 /// radio. However, if Success_U32 is returned with value 1, this means
351 /// the the packet was successfully passed the radio without any errors,
352 /// which tells the userland application that it does not need to wait
353 /// for a callback to check if any errors occurred while the packet was
354 /// being passed down to the radio. Any successful return value
355 /// indicates that the app should wait for a send_done() callback before
356 /// attempting to queue another packet. Currently, only will transmit if
357 /// the app has bound to the port passed in the tx_cfg buf as the source
358 /// address.
359 /// - If no port is bound, returns RESERVE,
360 /// - if it tries to send on a port other than the port which is bound,
361 /// returns INVALID. Notably, the currently transmit implementation
362 /// allows for starvation: an an app with a lower app id can send
363 /// constantly and starve an app with a later ID.
364 /// - `3`: Bind to the address in rx_cfg. Returns Ok(()) if that addr/port
365 /// combo is free, returns INVAL if the address requested is not a local
366 /// interface, or if the port requested is 0. Returns BUSY if that port is
367 /// already bound to by another app. This command should be called after
368 /// allow() is called on the rx_cfg buffer, and before subscribe() is used
369 /// to set up the recv callback. Additionally, apps can only send on ports
370 /// after they have bound to said port. If this command is called and the
371 /// address in rx_cfg is 0::0 : 0, this command will reset the option
372 /// containing the bound port to None. Notably, the current implementation
373 /// of this only allows for each app to bind to a single port at a time,
374 /// as such an implementation conserves memory (and is similar to the
375 /// approach applied by TinyOS and Riot).
376 /// - `4`: Returns the maximum payload that can be transmitted by apps using
377 /// this driver. This represents the size of the payload buffer in the
378 /// kernel. Apps can use this syscall to ensure they do not attempt to
379 /// send too-large messages.
380
381 fn command(
382 &self,
383 command_num: usize,
384 arg1: usize,
385 _: usize,
386 processid: ProcessId,
387 ) -> CommandReturn {
388 match command_num {
389 0 => CommandReturn::success(),
390
391 // Writes the requested number of network interface addresses
392 // `arg1`: number of interfaces requested that will fit into the buffer
393 1 => {
394 self.apps
395 .enter(processid, |_, kernel_data| {
396 kernel_data
397 .get_readwrite_processbuffer(rw_allow::CFG)
398 .and_then(|cfg| {
399 cfg.mut_enter(|cfg| {
400 if cfg.len() != arg1 * size_of::<IPAddr>() {
401 return CommandReturn::failure(ErrorCode::INVAL);
402 }
403 let n_ifaces_to_copy =
404 cmp::min(arg1, self.interface_list.len());
405 let iface_size = size_of::<IPAddr>();
406 for i in 0..n_ifaces_to_copy {
407 cfg[i * iface_size..(i + 1) * iface_size]
408 .copy_from_slice(&self.interface_list[i].0);
409 }
410 // Returns total number of interfaces
411 CommandReturn::success_u32(self.interface_list.len() as u32)
412 })
413 })
414 .unwrap_or(CommandReturn::failure(ErrorCode::INVAL))
415 })
416 .unwrap_or_else(|err| CommandReturn::failure(err.into()))
417 }
418
419 // Transmits UDP packet stored in tx_buf
420 2 => {
421 let res = self
422 .apps
423 .enter(processid, |app, kernel_data| {
424 if app.pending_tx.is_some() {
425 // Cannot support more than one pending tx per process.
426 return Err(ErrorCode::BUSY);
427 }
428 if app.bound_port.is_none() {
429 // Currently, apps need to bind to a port before they can send from said port
430 return Err(ErrorCode::RESERVE);
431 }
432 let next_tx = kernel_data
433 .get_readwrite_processbuffer(rw_allow::CFG)
434 .and_then(|cfg| {
435 cfg.enter(|cfg| {
436 if cfg.len() != 2 * size_of::<UDPEndpoint>() {
437 return None;
438 }
439
440 let mut tmp_cfg_buffer: [u8; size_of::<UDPEndpoint>() * 2] =
441 [0; size_of::<UDPEndpoint>() * 2];
442 cfg.copy_to_slice(&mut tmp_cfg_buffer);
443
444 if let (Some(dst), Some(src)) = (
445 self.parse_ip_port_pair(
446 &tmp_cfg_buffer[size_of::<UDPEndpoint>()..],
447 ),
448 self.parse_ip_port_pair(
449 &tmp_cfg_buffer[..size_of::<UDPEndpoint>()],
450 ),
451 ) {
452 if Some(src) == app.bound_port {
453 Some([src, dst])
454 } else {
455 None
456 }
457 } else {
458 None
459 }
460 })
461 })
462 .unwrap_or(None);
463 if next_tx.is_none() {
464 return Err(ErrorCode::INVAL);
465 }
466 app.pending_tx = next_tx;
467 Ok(())
468 })
469 .unwrap_or_else(|err| Err(err.into()));
470 match res {
471 Ok(()) => self
472 .do_next_tx_immediate(processid)
473 .map_or_else(CommandReturn::failure, CommandReturn::success_u32),
474 Err(e) => CommandReturn::failure(e),
475 }
476 }
477 3 => {
478 let err = self
479 .apps
480 .enter(processid, |app, kernel_data| {
481 // Move UDPEndpoint into udp.rs?
482 let requested_addr_opt = kernel_data
483 .get_readwrite_processbuffer(rw_allow::RX_CFG)
484 .and_then(|rx_cfg| {
485 rx_cfg.enter(|cfg| {
486 if cfg.len() != 2 * mem::size_of::<UDPEndpoint>() {
487 None
488 } else {
489 let mut tmp_endpoint: [u8; mem::size_of::<UDPEndpoint>()] =
490 [0; mem::size_of::<UDPEndpoint>()];
491 cfg[mem::size_of::<UDPEndpoint>()..]
492 .copy_to_slice(&mut tmp_endpoint);
493
494 self.parse_ip_port_pair(&tmp_endpoint)
495 }
496 })
497 })
498 .unwrap_or(None);
499 requested_addr_opt.map_or(Err(Err(ErrorCode::INVAL)), |requested_addr| {
500 // If zero address, close any already bound socket
501 if requested_addr.is_zero() {
502 app.bound_port = None;
503 return Ok(None);
504 }
505 // Check that requested addr is a local interface
506 let mut requested_is_local = false;
507 for i in 0..self.interface_list.len() {
508 if requested_addr.addr == self.interface_list[i] {
509 requested_is_local = true;
510 }
511 }
512 if !requested_is_local {
513 return Err(Err(ErrorCode::INVAL));
514 }
515 Ok(Some(requested_addr))
516 })
517 })
518 .unwrap_or_else(|err| Err(err.into()));
519 match err {
520 Ok(requested_addr_opt) => {
521 requested_addr_opt.map_or(CommandReturn::success(), |requested_addr| {
522 // Check bound ports in the kernel.
523 match self.port_table.is_bound(requested_addr.port) {
524 Ok(bound) => {
525 if bound {
526 CommandReturn::failure(ErrorCode::BUSY)
527 } else {
528 self.apps
529 .enter(processid, |app, _| {
530 // The requested addr is free and valid
531 app.bound_port = Some(requested_addr);
532 CommandReturn::success()
533 })
534 .unwrap_or_else(|err| {
535 CommandReturn::failure(err.into())
536 })
537 }
538 }
539 Err(()) => CommandReturn::failure(ErrorCode::FAIL), //error in port table
540 }
541 })
542 }
543 Err(retcode) => CommandReturn::failure(retcode.try_into().unwrap()),
544 }
545 }
546 4 => CommandReturn::success_u32(self.max_tx_pyld_len as u32),
547 _ => CommandReturn::failure(ErrorCode::NOSUPPORT),
548 }
549 }
550
551 fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
552 self.apps.enter(processid, |_, _| {})
553 }
554}
555
556impl UDPSendClient for UDPDriver<'_> {
557 fn send_done(&self, result: Result<(), ErrorCode>, mut dgram: SubSliceMut<'static, u8>) {
558 // Replace the returned kernel buffer. Now we can send the next msg.
559 dgram.reset();
560 self.kernel_buffer.replace(dgram);
561 self.current_app.get().map(|processid| {
562 let _ = self.apps.enter(processid, |_app, upcalls| {
563 let _ = upcalls.schedule_upcall(
564 upcall::PACKET_TRANSMITTED,
565 (kernel::errorcode::into_statuscode(result), 0, 0),
566 );
567 });
568 });
569 self.current_app.set(None);
570 self.do_next_tx_queued();
571 }
572}
573
574impl UDPRecvClient for UDPDriver<'_> {
575 fn receive(
576 &self,
577 src_addr: IPAddr,
578 dst_addr: IPAddr,
579 src_port: u16,
580 dst_port: u16,
581 payload: &[u8],
582 ) {
583 self.apps.each(|_, app, kernel_data| {
584 if app.bound_port.is_some() {
585 let mut for_me = false;
586 app.bound_port.as_ref().map(|requested_addr| {
587 if requested_addr.addr == dst_addr && requested_addr.port == dst_port {
588 for_me = true;
589 }
590 });
591 if for_me {
592 let len = payload.len();
593 let res = kernel_data
594 .get_readwrite_processbuffer(rw_allow::READ)
595 .and_then(|read| {
596 read.mut_enter(|rbuf| {
597 if rbuf.len() >= len {
598 rbuf[..len].copy_from_slice(&payload[..len]);
599 Ok(())
600 } else {
601 Err(ErrorCode::SIZE) //packet does not fit
602 }
603 })
604 })
605 .unwrap_or(Ok(()));
606 if res.is_ok() {
607 // Write address of sender into rx_cfg so it can be read by client
608 let sender_addr = UDPEndpoint {
609 addr: src_addr,
610 port: src_port,
611 };
612 let _ = kernel_data.schedule_upcall(upcall::PACKET_RECEIVED, (len, 0, 0));
613 const CFG_LEN: usize = 2 * size_of::<UDPEndpoint>();
614 let _ = kernel_data
615 .get_readwrite_processbuffer(rw_allow::RX_CFG)
616 .and_then(|rx_cfg| {
617 rx_cfg.mut_enter(|cfg| {
618 if cfg.len() != CFG_LEN {
619 return Err(ErrorCode::INVAL);
620 }
621 let mut tmp_cfg_buffer: [u8; CFG_LEN] = [0; CFG_LEN];
622 sender_addr.encode(&mut tmp_cfg_buffer, 0);
623 cfg.copy_from_slice(&tmp_cfg_buffer);
624 Ok(())
625 })
626 })
627 .unwrap_or(Err(ErrorCode::INVAL));
628 }
629 }
630 }
631 });
632 }
633}
634
635impl PortQuery for UDPDriver<'_> {
636 // Returns true if |port| is bound (on any iface), false otherwise.
637 fn is_bound(&self, port: u16) -> bool {
638 let mut port_bound = false;
639 for app in self.apps.iter() {
640 app.enter(|other_app, _| {
641 if other_app.bound_port.is_some() {
642 let other_addr_opt = other_app.bound_port;
643 let other_addr = other_addr_opt.unwrap(); // Unwrap fail = Missing other_addr
644 if other_addr.port == port {
645 port_bound = true;
646 }
647 }
648 });
649 }
650 port_bound
651 }
652}