virtio/devices/
virtio_net.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2025.
4
5use core::cell::Cell;
6
7use kernel::hil::ethernet::{EthernetAdapterDatapath, EthernetAdapterDatapathClient};
8use kernel::utilities::cells::OptionalCell;
9use kernel::utilities::registers::{register_bitfields, LocalRegisterCopy};
10use kernel::ErrorCode;
11
12use super::super::devices::{VirtIODeviceDriver, VirtIODeviceType};
13use super::super::queues::split_queue::{SplitVirtqueue, SplitVirtqueueClient, VirtqueueBuffer};
14
15register_bitfields![u64,
16    VirtIONetFeatures [
17        VirtIONetFCsum OFFSET(0) NUMBITS(1),
18        VirtIONetFGuestCsum OFFSET(1) NUMBITS(1),
19        VirtIONetFCtrlGuestOffloads OFFSET(2) NUMBITS(1),
20        VirtIONetFMtu OFFSET(3) NUMBITS(1),
21        VirtIONetFMac OFFSET(5) NUMBITS(1),
22        VirtIONetFGuestTso4 OFFSET(7) NUMBITS(1),
23        VirtIONetFGuestTso6 OFFSET(8) NUMBITS(1),
24        VirtIONetFGuestEcn OFFSET(9) NUMBITS(1),
25        VirtIONetFGuestUfo OFFSET(10) NUMBITS(1),
26        VirtIONetFHostTso4 OFFSET(11) NUMBITS(1),
27        VirtIONetFHostTso6 OFFSET(12) NUMBITS(1),
28        VirtIONetFHostEcn OFFSET(13) NUMBITS(1),
29        VirtIONetFHostUfo OFFSET(14) NUMBITS(1),
30        VirtIONetFMrgRxbuf OFFSET(15) NUMBITS(1),
31        VirtIONetFStatus OFFSET(16) NUMBITS(1),
32        VirtIONetFCtrlVq OFFSET(17) NUMBITS(1),
33        VirtIONetFCtrlRx OFFSET(18) NUMBITS(1),
34        VirtIONetFCtrlVlan OFFSET(19) NUMBITS(1),
35        VirtIONetFGuestAnnounce OFFSET(21) NUMBITS(1),
36        VirtIONetFMq OFFSET(22) NUMBITS(1),
37        VirtIONetFCtrlMacAddr OFFSET(23) NUMBITS(1),
38        // these feature bits would not be passed through the driver, as
39        // they are in a region reserved for future extensions?
40        VirtIONetFRscExt OFFSET(61) NUMBITS(1),
41        VirtIONetFStandby OFFSET(62) NUMBITS(1),
42    ]
43];
44
45pub struct VirtIONet<'a> {
46    rxqueue: &'a SplitVirtqueue<'static, 'static, 2>,
47    txqueue: &'a SplitVirtqueue<'static, 'static, 2>,
48    tx_header: OptionalCell<&'static mut [u8; 12]>,
49    tx_frame_info: Cell<(u16, usize)>,
50    rx_header: OptionalCell<&'static mut [u8]>,
51    rx_buffer: OptionalCell<&'static mut [u8]>,
52    client: OptionalCell<&'a dyn EthernetAdapterDatapathClient>,
53    rx_enabled: Cell<bool>,
54}
55
56impl<'a> VirtIONet<'a> {
57    pub fn new(
58        txqueue: &'a SplitVirtqueue<'static, 'static, 2>,
59        tx_header: &'static mut [u8; 12],
60        rxqueue: &'a SplitVirtqueue<'static, 'static, 2>,
61        rx_header: &'static mut [u8],
62        rx_buffer: &'static mut [u8],
63    ) -> VirtIONet<'a> {
64        txqueue.enable_used_callbacks();
65        rxqueue.enable_used_callbacks();
66
67        VirtIONet {
68            rxqueue,
69            txqueue,
70            tx_header: OptionalCell::new(tx_header),
71            tx_frame_info: Cell::new((0, 0)),
72            rx_header: OptionalCell::new(rx_header),
73            rx_buffer: OptionalCell::new(rx_buffer),
74            client: OptionalCell::empty(),
75            rx_enabled: Cell::new(false),
76        }
77    }
78
79    fn reinsert_virtqueue_receive_buffer(&self) {
80        // Don't reinsert receive buffer when reception is disabled. The buffers
81        // will be reinserted on the next call to `enable_receive`:
82        if !self.rx_enabled.get() {
83            return;
84        }
85
86        // Place the receive buffers into the device's VirtQueue
87        if let Some(rx_buffer) = self.rx_buffer.take() {
88            let rx_buffer_len = rx_buffer.len();
89
90            let mut buffer_chain = [
91                Some(VirtqueueBuffer {
92                    buf: self.rx_header.take().unwrap(),
93                    len: 12,
94                    device_writeable: true,
95                }),
96                Some(VirtqueueBuffer {
97                    buf: rx_buffer,
98                    len: rx_buffer_len,
99                    device_writeable: true,
100                }),
101            ];
102
103            self.rxqueue
104                .provide_buffer_chain(&mut buffer_chain)
105                .unwrap();
106        }
107    }
108}
109
110impl SplitVirtqueueClient<'static> for VirtIONet<'_> {
111    fn buffer_chain_ready(
112        &self,
113        queue_number: u32,
114        buffer_chain: &mut [Option<VirtqueueBuffer<'static>>],
115        bytes_used: usize,
116    ) {
117        if queue_number == self.rxqueue.queue_number().unwrap() {
118            // Received an Ethernet frame
119
120            let rx_header = buffer_chain[0].take().expect("No header buffer").buf;
121            // TODO: do something with the header
122            self.rx_header.replace(rx_header);
123
124            let rx_buffer = buffer_chain[1].take().expect("No rx content buffer").buf;
125
126            if self.rx_enabled.get() {
127                self.client
128                    .map(|client| client.received_frame(&rx_buffer[..(bytes_used - 12)], None));
129            }
130
131            self.rx_buffer.replace(rx_buffer);
132
133            // Re-run enable RX to provide the RX buffer chain back to the
134            // device (if reception is still enabled):
135            self.reinsert_virtqueue_receive_buffer();
136        } else if queue_number == self.txqueue.queue_number().unwrap() {
137            // Sent an Ethernet frame
138
139            let header_buf = buffer_chain[0].take().expect("No header buffer").buf;
140            self.tx_header.replace(header_buf.try_into().unwrap());
141
142            let frame_buf = buffer_chain[1].take().expect("No frame buffer").buf;
143
144            let (frame_len, transmission_identifier) = self.tx_frame_info.get();
145
146            self.client.map(move |client| {
147                client.transmit_frame_done(
148                    Ok(()),
149                    frame_buf,
150                    frame_len,
151                    transmission_identifier,
152                    None,
153                )
154            });
155        } else {
156            panic!("Callback from unknown queue");
157        }
158    }
159}
160
161impl VirtIODeviceDriver for VirtIONet<'_> {
162    fn negotiate_features(&self, offered_features: u64) -> Option<u64> {
163        let offered_features =
164            LocalRegisterCopy::<u64, VirtIONetFeatures::Register>::new(offered_features);
165        let mut negotiated_features = LocalRegisterCopy::<u64, VirtIONetFeatures::Register>::new(0);
166
167        if offered_features.is_set(VirtIONetFeatures::VirtIONetFMac) {
168            // VIRTIO_NET_F_MAC offered, which means that the device has a MAC
169            // address. Accept this feature, which is required for this driver
170            // for now.
171            negotiated_features.modify(VirtIONetFeatures::VirtIONetFMac::SET);
172        } else {
173            return None;
174        }
175
176        // TODO: QEMU doesn't offer this, but don't we need it? Does QEMU
177        // implicitly provide the feature but not offer it? Find out!
178        // if offered_features & (1 << 15) != 0 {
179        //     // VIRTIO_NET_F_MRG_RXBUF
180        //     //
181        //     // accept
182        //     negotiated_features |= 1 << 15;
183        // } else {
184        //     panic!("Missing NET_F_MRG_RXBUF");
185        // }
186
187        // Ignore everything else
188        Some(negotiated_features.get())
189    }
190
191    fn device_type(&self) -> VirtIODeviceType {
192        VirtIODeviceType::NetworkCard
193    }
194}
195
196impl<'a> EthernetAdapterDatapath<'a> for VirtIONet<'a> {
197    fn set_client(&self, client: &'a dyn EthernetAdapterDatapathClient) {
198        self.client.set(client);
199    }
200
201    fn enable_receive(&self) {
202        // Enable receive callbacks:
203        self.rx_enabled.set(true);
204
205        // Attempt to reinsert any driver-owned receive buffers into the receive
206        // queues. This will be a nop if reception was already enabled before
207        // this call:
208        self.reinsert_virtqueue_receive_buffer();
209    }
210
211    fn disable_receive(&self) {
212        // Disable receive callbacks:
213        self.rx_enabled.set(false);
214
215        // We don't "steal" any receive buffers out of the virtqueue, but the
216        // above flag will avoid reinserting buffers into the VirtQueue until
217        // reception is enabled again:
218    }
219
220    fn transmit_frame(
221        &self,
222        frame_buffer: &'static mut [u8],
223        len: u16,
224        transmission_identifier: usize,
225    ) -> Result<(), (ErrorCode, &'static mut [u8])> {
226        // Try to get a hold of the header buffer
227        //
228        // Otherwise, the device is currently busy transmissing a buffer
229        //
230        // TODO: Implement simultaneous transmissions
231        let mut frame_queue_buf = Some(VirtqueueBuffer {
232            buf: frame_buffer,
233            len: len as usize,
234            device_writeable: false,
235        });
236
237        let header_buf = self
238            .tx_header
239            .take()
240            .ok_or(ErrorCode::BUSY)
241            .map_err(|ret| (ret, frame_queue_buf.take().unwrap().buf))?;
242
243        // Write the header
244        //
245        // TODO: Can this be done more elegantly using a struct of registers?
246        header_buf[0] = 0; // flags -> we don't want checksumming
247        header_buf[1] = 0; // gso -> no checksumming or fragmentation
248        header_buf[2] = 0; // hdr_len_low
249        header_buf[3] = 0; // hdr_len_high
250        header_buf[4] = 0; // gso_size
251        header_buf[5] = 0; // gso_size
252        header_buf[6] = 0; // csum_start
253        header_buf[7] = 0; // csum_start
254        header_buf[8] = 0; // csum_offset
255        header_buf[9] = 0; // csum_offsetb
256        header_buf[10] = 0; // num_buffers
257        header_buf[11] = 0; // num_buffers
258
259        let mut buffer_chain = [
260            Some(VirtqueueBuffer {
261                buf: header_buf,
262                len: 12,
263                device_writeable: false,
264            }),
265            frame_queue_buf.take(),
266        ];
267
268        self.tx_frame_info.set((len, transmission_identifier));
269
270        self.txqueue
271            .provide_buffer_chain(&mut buffer_chain)
272            .map_err(move |ret| (ret, buffer_chain[1].take().unwrap().buf))?;
273
274        Ok(())
275    }
276}