virtio/devices/virtio_gpu/
mod.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2024.
4
5use core::cell::Cell;
6use core::ops::Range;
7
8use kernel::deferred_call::{DeferredCall, DeferredCallClient};
9use kernel::hil::screen::{Screen, ScreenClient, ScreenPixelFormat, ScreenRotation};
10use kernel::utilities::cells::{OptionalCell, TakeCell};
11use kernel::utilities::leasable_buffer::SubSliceMut;
12use kernel::ErrorCode;
13
14use super::super::devices::{VirtIODeviceDriver, VirtIODeviceType};
15use super::super::queues::split_queue::{SplitVirtqueue, SplitVirtqueueClient, VirtqueueBuffer};
16
17mod deferred_call;
18mod helpers;
19mod messages;
20
21use messages::{
22    ctrl_header::CtrlHeader,
23    resource_attach_backing::{MemEntry, ResourceAttachBackingReq, ResourceAttachBackingResp},
24    resource_create_2d::{ResourceCreate2DReq, ResourceCreate2DResp, VideoFormat},
25    resource_detach_backing::{ResourceDetachBackingReq, ResourceDetachBackingResp},
26    resource_flush::{ResourceFlushReq, ResourceFlushResp},
27    set_scanout::{SetScanoutReq, SetScanoutResp},
28    transfer_to_host_2d::{TransferToHost2DReq, TransferToHost2DResp},
29    Rect, VirtIOGPUReq, VirtIOGPUResp,
30};
31
32/// The total number of bytes occupied by a pixel in memory.
33///
34/// All [`VideoFormat`]s supported by VirtIO have a pixel stride of 4.
35pub const PIXEL_STRIDE: usize = 4;
36
37/// How many individual memory regions a backing buffer for a resouce
38/// can be split over.
39///
40/// This constant is used in calculating the maxium size of a
41/// [`ResourceAttachBackingReq`], which in turn is used to calculate
42/// the overall maximum request size issued by the [`VirtIOGPU`]
43/// driver.
44pub const MAX_ATTACH_BACKING_REQ_MEMORY_ENTRIES: usize = 1;
45
46/// Maximum size of any single request issued by the [`VirtIOGPU`]
47/// driver.
48pub const MAX_REQ_SIZE: usize = helpers::max(&[
49    ResourceCreate2DReq::ENCODED_SIZE,
50    ResourceAttachBackingReq::<{ MAX_ATTACH_BACKING_REQ_MEMORY_ENTRIES }>::ENCODED_SIZE,
51    SetScanoutReq::ENCODED_SIZE,
52    TransferToHost2DReq::ENCODED_SIZE,
53    ResourceFlushReq::ENCODED_SIZE,
54    ResourceDetachBackingReq::ENCODED_SIZE,
55]);
56
57/// Maximum size of any single response returned by the device to the
58/// [`VirtIOGPU`] driver.
59pub const MAX_RESP_SIZE: usize = helpers::max(&[
60    ResourceCreate2DResp::ENCODED_SIZE,
61    ResourceAttachBackingResp::ENCODED_SIZE,
62    SetScanoutResp::ENCODED_SIZE,
63    ResourceFlushResp::ENCODED_SIZE,
64    ResourceDetachBackingResp::ENCODED_SIZE,
65]);
66
67/// State machine states for the [`VirtIOGPU`] driver.
68#[derive(Copy, Clone, Debug)]
69pub enum VirtIOGPUState {
70    Uninitialized,
71    InitializingResourceCreate2D,
72    InitializingResourceAttachBacking,
73    InitializingSetScanout,
74    InitializingResourceDetachBacking,
75    Idle,
76    SettingWriteFrame,
77    DrawResourceAttachBacking,
78    DrawTransferToHost2D,
79    DrawResourceFlush,
80    DrawResourceDetachBacking,
81}
82
83/// Driver for a VirtIO `GPUDevice`-class device.
84///
85/// Implements Tock's `Screen` HIL, and supports a single head with
86/// the `ARGB_8888` pixel mode.
87pub struct VirtIOGPU<'a, 'b> {
88    // Misc driver state:
89    client: OptionalCell<&'a dyn ScreenClient>,
90    state: Cell<VirtIOGPUState>,
91    deferred_call: DeferredCall,
92    pending_deferred_call_mask: deferred_call::PendingDeferredCallMask,
93
94    // VirtIO bus and buffers:
95    control_queue: &'a SplitVirtqueue<'a, 'b, 2>,
96    req_resp_buffers: OptionalCell<(&'b mut [u8; MAX_REQ_SIZE], &'b mut [u8; MAX_RESP_SIZE])>,
97
98    // Video output parameters:
99    width: u32,
100    height: u32,
101
102    // Set up by `Screen::set_write_frame`, and then later written to with a
103    // call to `Screen::write`. It contains the `Rect` being written to, and the
104    // current write offset in (x, y) coordinates:
105    current_draw_area: Cell<(
106        // Draw area:
107        Rect,
108        // Current draw offset, relative to the draw area itself:
109        (u32, u32),
110        // Optimization -- count the number of pixels remaining undrawn:
111        usize,
112    )>,
113
114    // The client provides us a subslice, but we need to place a `&'static mut`
115    // buffer into the VirtQueue. We store the client's bounds here. We can't
116    // use a `Range<usize>` as it isn't `Copy`, and so have to store
117    // `rnage.start` and `range.end` instead.
118    write_buffer_subslice_range: Cell<(usize, usize)>,
119
120    // We can only draw rectangles, but the client can ask us to do arbitrarily
121    // sized partial writes. This means that sometimes we might need to perform
122    // multiple writes in response to a single client request. This stores the
123    // offset into the client's buffer we've processed so far:
124    write_buffer_offset: Cell<usize>,
125
126    // Slot for the client's write buffer, while it's attached to the GPU:
127    write_buffer: TakeCell<'static, [u8]>,
128
129    // Current rect being transfered to the host:
130    current_transfer_area_pixels: Cell<(Rect, usize)>,
131}
132
133impl<'a, 'b> VirtIOGPU<'a, 'b> {
134    pub fn new(
135        control_queue: &'a SplitVirtqueue<'a, 'b, 2>,
136        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
137        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
138        width: usize,
139        height: usize,
140    ) -> Result<VirtIOGPU<'a, 'b>, ErrorCode> {
141        let width: u32 = width.try_into().map_err(|_| ErrorCode::SIZE)?;
142        let height: u32 = height.try_into().map_err(|_| ErrorCode::SIZE)?;
143
144        Ok(VirtIOGPU {
145            client: OptionalCell::empty(),
146            state: Cell::new(VirtIOGPUState::Uninitialized),
147            deferred_call: DeferredCall::new(),
148            pending_deferred_call_mask: deferred_call::PendingDeferredCallMask::new(),
149
150            control_queue,
151            req_resp_buffers: OptionalCell::new((req_buffer, resp_buffer)),
152
153            width,
154            height,
155
156            current_draw_area: Cell::new((Rect::empty(), (0, 0), 0)),
157            write_buffer_subslice_range: Cell::new((0, 0)),
158            write_buffer_offset: Cell::new(0),
159            write_buffer: TakeCell::empty(),
160            current_transfer_area_pixels: Cell::new((Rect::empty(), 0)),
161        })
162    }
163
164    pub fn initialize(&self) -> Result<(), ErrorCode> {
165        // We can't double-initialize this device:
166        let VirtIOGPUState::Uninitialized = self.state.get() else {
167            return Err(ErrorCode::ALREADY);
168        };
169
170        // Enable callbacks for used descriptors:
171        self.control_queue.enable_used_callbacks();
172
173        // Take the request and response buffers. They must be available during
174        // initialization:
175        let (req_buffer, resp_buffer) = self.req_resp_buffers.take().unwrap();
176
177        // Step 1: Create host resource
178        let cmd_resource_create_2d_req = ResourceCreate2DReq {
179            ctrl_header: CtrlHeader {
180                ctrl_type: ResourceCreate2DReq::CTRL_TYPE,
181                flags: 0,
182                fence_id: 0,
183                ctx_id: 0,
184                padding: 0,
185            },
186            resource_id: 1,
187            format: VideoFormat::A8R8G8B8Unorm,
188            width: self.width,
189            height: self.height,
190        };
191        cmd_resource_create_2d_req
192            .write_to_byte_iter(&mut req_buffer.iter_mut())
193            .unwrap();
194
195        let mut buffer_chain = [
196            Some(VirtqueueBuffer {
197                buf: req_buffer,
198                len: ResourceCreate2DReq::ENCODED_SIZE,
199                device_writeable: false,
200            }),
201            Some(VirtqueueBuffer {
202                buf: resp_buffer,
203                len: ResourceCreate2DResp::ENCODED_SIZE,
204                device_writeable: true,
205            }),
206        ];
207        self.control_queue
208            .provide_buffer_chain(&mut buffer_chain)
209            .unwrap();
210
211        self.state.set(VirtIOGPUState::InitializingResourceCreate2D);
212
213        Ok(())
214    }
215
216    fn initialize_resource_create_2d_resp(
217        &self,
218        _resp: ResourceCreate2DResp,
219        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
220        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
221    ) {
222        // Step 2: Attach backing memory (our framebuffer)
223
224        // At first, we attach a zero-sized dummy buffer:
225        const ENTRIES: usize = 1;
226        let cmd_resource_attach_backing_req: ResourceAttachBackingReq<{ ENTRIES }> =
227            ResourceAttachBackingReq {
228                ctrl_header: CtrlHeader {
229                    ctrl_type: ResourceAttachBackingReq::<{ ENTRIES }>::CTRL_TYPE,
230                    flags: 0,
231                    fence_id: 0,
232                    ctx_id: 0,
233                    padding: 0,
234                },
235                resource_id: 1,
236                nr_entries: ENTRIES as u32,
237                entries: [MemEntry {
238                    // TODO: use dummy buffer!
239                    addr: 1,
240                    length: 1,
241                    padding: 0,
242                }],
243            };
244        cmd_resource_attach_backing_req
245            .write_to_byte_iter(&mut req_buffer.iter_mut())
246            .unwrap();
247
248        let mut buffer_chain = [
249            Some(VirtqueueBuffer {
250                buf: req_buffer,
251                len: ResourceAttachBackingReq::<{ ENTRIES }>::ENCODED_SIZE,
252                device_writeable: false,
253            }),
254            Some(VirtqueueBuffer {
255                buf: resp_buffer,
256                len: ResourceAttachBackingResp::ENCODED_SIZE,
257                device_writeable: true,
258            }),
259        ];
260        self.control_queue
261            .provide_buffer_chain(&mut buffer_chain)
262            .unwrap();
263
264        self.state
265            .set(VirtIOGPUState::InitializingResourceAttachBacking);
266    }
267
268    fn initialize_resource_attach_backing_resp(
269        &self,
270        _resp: ResourceAttachBackingResp,
271        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
272        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
273    ) {
274        // Step 3: Set scanout
275        let cmd_set_scanout_req = SetScanoutReq {
276            ctrl_header: CtrlHeader {
277                ctrl_type: SetScanoutReq::CTRL_TYPE,
278                flags: 0,
279                fence_id: 0,
280                ctx_id: 0,
281                padding: 0,
282            },
283            r: Rect {
284                x: 0,
285                y: 0,
286                width: self.width,
287                height: self.height,
288            },
289            scanout_id: 0,
290            resource_id: 1,
291        };
292        cmd_set_scanout_req
293            .write_to_byte_iter(&mut req_buffer.iter_mut())
294            .unwrap();
295
296        let mut buffer_chain = [
297            Some(VirtqueueBuffer {
298                buf: req_buffer,
299                len: SetScanoutReq::ENCODED_SIZE,
300                device_writeable: false,
301            }),
302            Some(VirtqueueBuffer {
303                buf: resp_buffer,
304                len: SetScanoutResp::ENCODED_SIZE,
305                device_writeable: true,
306            }),
307        ];
308        self.control_queue
309            .provide_buffer_chain(&mut buffer_chain)
310            .unwrap();
311
312        self.state.set(VirtIOGPUState::InitializingSetScanout);
313    }
314
315    fn initialize_set_scanout_resp(
316        &self,
317        _resp: SetScanoutResp,
318        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
319        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
320    ) {
321        // Step 4: Detach resource
322        let cmd_resource_detach_backing_req = ResourceDetachBackingReq {
323            ctrl_header: CtrlHeader {
324                ctrl_type: ResourceDetachBackingReq::CTRL_TYPE,
325                flags: 0,
326                fence_id: 0,
327                ctx_id: 0,
328                padding: 0,
329            },
330            resource_id: 1,
331            padding: 0,
332        };
333        cmd_resource_detach_backing_req
334            .write_to_byte_iter(&mut req_buffer.iter_mut())
335            .unwrap();
336
337        let mut buffer_chain = [
338            Some(VirtqueueBuffer {
339                buf: req_buffer,
340                len: ResourceDetachBackingReq::ENCODED_SIZE,
341                device_writeable: false,
342            }),
343            Some(VirtqueueBuffer {
344                buf: resp_buffer,
345                len: ResourceDetachBackingResp::ENCODED_SIZE,
346                device_writeable: true,
347            }),
348        ];
349        self.control_queue
350            .provide_buffer_chain(&mut buffer_chain)
351            .unwrap();
352
353        self.state
354            .set(VirtIOGPUState::InitializingResourceDetachBacking);
355    }
356
357    fn initialize_resource_detach_backing_resp(
358        &self,
359        _resp: ResourceDetachBackingResp,
360        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
361        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
362    ) {
363        // Initialization done! Return the buffers:
364        self.req_resp_buffers.replace((req_buffer, resp_buffer));
365
366        // Set the device state:
367        self.state.set(VirtIOGPUState::Idle);
368
369        // Then issue the appropriate callback:
370        self.client.map(|c| c.screen_is_ready());
371    }
372
373    fn continue_draw_transfer_to_host_2d(
374        &self,
375        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
376        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
377    ) {
378        // Now, the `TRANSFER_TO_HOST_2D` command can only copy rectangles.
379        // However, when we performed a partial write (let's say of just one
380        // pixel), then the current x offset will not perfectly line up with the
381        // left boundary of the overall draw rectangle. Similarly, when the
382        // buffer doesn't perfectly fill up the last row of pixels, we can't
383        // draw them together with the previous rows of the rectangle. Thus, a
384        // single `write` call may result in at most three underlying
385        // `TRANSFER_TO_HOST_2D` commands.
386        //
387        // At this stage, we have the `write_buffer_subslice_range` set to the
388        // client's range, `write_buffer_offset` contains the offset into this
389        // subslice range that we've already drawn, and `current_draw_area` has
390        // the correct offset into the rectangle on the host.
391        let (draw_rect, current_draw_offset, remaining_pixels) = self.current_draw_area.get();
392        let (write_buffer_subslice_range_start, write_buffer_subslice_range_end) =
393            self.write_buffer_subslice_range.get();
394        let write_buffer_subslice_range = Range {
395            start: write_buffer_subslice_range_start,
396            end: write_buffer_subslice_range_end,
397        };
398        let write_buffer_offset = self.write_buffer_offset.get();
399
400        // Compute the remaining bytes left in the client-supplied buffer:
401        let write_buffer_remaining_bytes = write_buffer_subslice_range
402            .len()
403            .checked_sub(write_buffer_offset)
404            .unwrap();
405        assert!(write_buffer_remaining_bytes % PIXEL_STRIDE == 0);
406        let write_buffer_remaining_pixels = write_buffer_remaining_bytes / PIXEL_STRIDE;
407        assert!(write_buffer_remaining_pixels <= remaining_pixels);
408
409        // Check whether the current draw offset within the rectangle has an `x`
410        // coordinate of zero. That means we can copy one or more full rows, or
411        // the last partial row of the draw area:
412        let transfer_pixels = if draw_rect.is_empty() {
413            // Short-circuit an empty draw_rect, to avoid divide by zero
414            // areas when using `rect.width` or `rect.height` as a divisor:
415            0
416        } else if current_draw_offset.0 == 0 {
417            // Okay, we can start drawing the full rectangle. We want to try
418            // drawing any full rows, if there are any left, and if not the
419            // last partial row:
420            assert!(current_draw_offset.1 <= draw_rect.height || remaining_pixels == 0);
421            if current_draw_offset.1 >= draw_rect.height {
422                // Just one row left to draw, and we start from `x ==
423                // 0`. This means we can just copy however much more data
424                // the client buffer holds. We've previously checked that
425                // the client buffer fully fits into the draw area, but
426                // re-check that assertion here:
427                assert!(draw_rect.width as usize >= write_buffer_remaining_pixels);
428                write_buffer_remaining_pixels
429            } else {
430                // There is more than one row left to copy, and we start
431                // from `x == 0`. If the client buffer lines up with the end
432                // of a row, we can copy them as a single
433                // rectangle. Otherwise, we need two copies:
434                write_buffer_remaining_pixels / (draw_rect.width as usize)
435                    * (draw_rect.width as usize)
436            }
437        } else {
438            // Our current draw offset is not zero. This means we must copy
439            // the current row, and then potentially any subsequent
440            // rows. Determine how much to copy based on the lower of the
441            // remaining data in the slice, or the remaining row width:
442            let remaining_row_width = draw_rect.width.checked_sub(current_draw_offset.0).unwrap();
443            core::cmp::min(remaining_row_width as usize, write_buffer_remaining_pixels)
444        };
445
446        // If we've got nothing left to copy, great! We're done drawing, but
447        // still need to detach the resource:
448        if transfer_pixels == 0 {
449            let cmd_resource_detach_backing_req = ResourceDetachBackingReq {
450                ctrl_header: CtrlHeader {
451                    ctrl_type: ResourceDetachBackingReq::CTRL_TYPE,
452                    flags: 0,
453                    fence_id: 0,
454                    ctx_id: 0,
455                    padding: 0,
456                },
457                resource_id: 1,
458                padding: 0,
459            };
460            cmd_resource_detach_backing_req
461                .write_to_byte_iter(&mut req_buffer.iter_mut())
462                .unwrap();
463
464            let mut buffer_chain = [
465                Some(VirtqueueBuffer {
466                    buf: req_buffer,
467                    len: ResourceDetachBackingReq::ENCODED_SIZE,
468                    device_writeable: false,
469                }),
470                Some(VirtqueueBuffer {
471                    buf: resp_buffer,
472                    len: ResourceDetachBackingResp::ENCODED_SIZE,
473                    device_writeable: true,
474                }),
475            ];
476            self.control_queue
477                .provide_buffer_chain(&mut buffer_chain)
478                .unwrap();
479
480            self.state.set(VirtIOGPUState::DrawResourceDetachBacking);
481
482            return;
483        }
484
485        // Otherwise, build the transfer rect from `transfer_pixels`,
486        // `draw_rect` and the current draw offset:
487        let transfer_rect = Rect {
488            x: draw_rect.x.checked_add(current_draw_offset.0).unwrap(),
489            y: draw_rect.y.checked_add(current_draw_offset.1).unwrap(),
490            width: core::cmp::min(transfer_pixels, draw_rect.width as usize) as u32,
491            height: transfer_pixels.div_ceil(draw_rect.width as usize) as u32,
492        };
493        self.current_transfer_area_pixels
494            .set((transfer_rect, transfer_pixels));
495
496        // Attach write buffer
497        let cmd_transfer_to_host_2d_req = TransferToHost2DReq {
498            ctrl_header: CtrlHeader {
499                ctrl_type: TransferToHost2DReq::CTRL_TYPE,
500                flags: 0,
501                fence_id: 0,
502                ctx_id: 0,
503                padding: 0,
504            },
505            r: transfer_rect,
506            offset: write_buffer_offset as u64,
507            resource_id: 1,
508            padding: 0,
509        };
510        // kernel::debug!(
511        //     "Transfer to host {:?}, {:?}",
512        //     transfer_rect,
513        //     write_buffer_offset
514        // );
515        cmd_transfer_to_host_2d_req
516            .write_to_byte_iter(&mut req_buffer.iter_mut())
517            .unwrap();
518
519        let mut buffer_chain = [
520            Some(VirtqueueBuffer {
521                buf: req_buffer,
522                len: TransferToHost2DReq::ENCODED_SIZE,
523                device_writeable: false,
524            }),
525            Some(VirtqueueBuffer {
526                buf: resp_buffer,
527                len: TransferToHost2DResp::ENCODED_SIZE,
528                device_writeable: true,
529            }),
530        ];
531        self.control_queue
532            .provide_buffer_chain(&mut buffer_chain)
533            .unwrap();
534
535        self.state.set(VirtIOGPUState::DrawTransferToHost2D);
536    }
537
538    fn continue_draw_resource_flush(
539        &self,
540        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
541        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
542    ) {
543        let (current_transfer_area, _) = self.current_transfer_area_pixels.get();
544
545        let cmd_resource_flush_req = ResourceFlushReq {
546            ctrl_header: CtrlHeader {
547                ctrl_type: ResourceFlushReq::CTRL_TYPE,
548                flags: 0,
549                fence_id: 0,
550                ctx_id: 0,
551                padding: 0,
552            },
553            r: current_transfer_area,
554            resource_id: 1,
555            padding: 0,
556        };
557        cmd_resource_flush_req
558            .write_to_byte_iter(&mut req_buffer.iter_mut())
559            .unwrap();
560
561        let mut buffer_chain = [
562            Some(VirtqueueBuffer {
563                buf: req_buffer,
564                len: ResourceFlushReq::ENCODED_SIZE,
565                device_writeable: false,
566            }),
567            Some(VirtqueueBuffer {
568                buf: resp_buffer,
569                len: ResourceFlushResp::ENCODED_SIZE,
570                device_writeable: true,
571            }),
572        ];
573        self.control_queue
574            .provide_buffer_chain(&mut buffer_chain)
575            .unwrap();
576
577        self.state.set(VirtIOGPUState::DrawResourceFlush);
578    }
579
580    fn continue_draw_resource_flushed(
581        &self,
582        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
583        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
584    ) {
585        // We've finished one write command, but there might be more to
586        // come. Increment `current_draw_offset` and `write_buffer_offset`, and
587        // decrement `remaining_pixels` accordingly.
588        let (draw_rect, mut current_draw_offset, mut remaining_pixels) =
589            self.current_draw_area.get();
590        let mut write_buffer_offset = self.write_buffer_offset.get();
591
592        // This is what we've just drawn:
593        let (drawn_area, drawn_pixels) = self.current_transfer_area_pixels.get();
594
595        // We always draw left -> right, top -> bottom, so we can simply set the
596        // current `x` and `y` coordinates to the bottom-right most coordinates
597        // we've just drawn (while wrapping and carrying the one):
598        current_draw_offset.0 = drawn_area
599            .x
600            .checked_add(drawn_area.width)
601            .and_then(|drawn_x1| drawn_x1.checked_sub(draw_rect.x))
602            .unwrap();
603        current_draw_offset.1 = drawn_area
604            .y
605            .checked_add(drawn_area.height)
606            .and_then(|drawn_y1| drawn_y1.checked_sub(draw_rect.y))
607            .unwrap();
608
609        // Wrap to the next line when we've finished writing the column of our
610        // last row drawn:
611        assert!(current_draw_offset.0 <= draw_rect.width);
612        if current_draw_offset.0 == draw_rect.width {
613            current_draw_offset.0 = 0;
614            current_draw_offset.1 = current_draw_offset.1.checked_add(1).unwrap();
615        }
616
617        // Subtract our drawn_pixels from `remaining_pixels`:
618        assert!(remaining_pixels >= drawn_pixels);
619        remaining_pixels -= drawn_pixels;
620
621        // Add our drawn pixels * PIXEL_STRIDE to the buffer offset:
622        write_buffer_offset += drawn_pixels.checked_mul(PIXEL_STRIDE).unwrap();
623
624        // Write all of this back:
625        self.current_draw_area
626            .set((draw_rect, current_draw_offset, remaining_pixels));
627        self.write_buffer_offset.set(write_buffer_offset);
628
629        // And continue drawing:
630        self.continue_draw_transfer_to_host_2d(req_buffer, resp_buffer);
631    }
632
633    fn continue_draw_resource_detached_backing(
634        &self,
635        req_buffer: &'b mut [u8; MAX_REQ_SIZE],
636        resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
637    ) {
638        self.req_resp_buffers.replace((req_buffer, resp_buffer));
639        self.state.set(VirtIOGPUState::Idle);
640
641        let (write_buffer_subslice_range_start, write_buffer_subslice_range_end) =
642            self.write_buffer_subslice_range.get();
643        let write_buffer_subslice_range = Range {
644            start: write_buffer_subslice_range_start,
645            end: write_buffer_subslice_range_end,
646        };
647
648        let mut subslice = SubSliceMut::new(self.write_buffer.take().unwrap());
649        subslice.slice(write_buffer_subslice_range);
650
651        self.client.map(|c| c.write_complete(subslice, Ok(())));
652    }
653
654    fn buffer_chain_callback(
655        &self,
656        buffer_chain: &mut [Option<VirtqueueBuffer<'b>>],
657        _bytes_used: usize,
658    ) {
659        // Every response should return exactly two buffers: one
660        // request buffer, and one response buffer.
661        let req_buffer = buffer_chain
662            .get_mut(0)
663            .and_then(|opt_buf| opt_buf.take())
664            .expect("Missing request buffer in VirtIO GPU buffer chain");
665        let resp_buffer = buffer_chain
666            .get_mut(1)
667            .and_then(|opt_buf| opt_buf.take())
668            .expect("Missing request buffer in VirtIO GPU buffer chain");
669
670        // Convert the buffer slices back into arrays:
671        let req_array: &mut [u8; MAX_REQ_SIZE] = req_buffer
672            .buf
673            .try_into()
674            .expect("Returned VirtIO GPU request buffer has unexpected size!");
675
676        let resp_length = resp_buffer.len;
677        let resp_array: &mut [u8; MAX_RESP_SIZE] = resp_buffer
678            .buf
679            .try_into()
680            .expect("Returned VirtIO GPU response buffer has unexpected size!");
681
682        // Check that the response has a length we can parse into a CtrlHeader:
683        if resp_length < CtrlHeader::ENCODED_SIZE {
684            panic!(
685                "VirtIO GPU returned response smaller than the CtrlHeader, \
686                 which we cannot parse! Returned bytes: {}",
687                resp_length
688            )
689        }
690
691        // We progressively parse the response, starting with the CtrlHeader
692        // shared across all messages, checking its type, and then parsing the
693        // rest. We do so by reusing a common iterator across these operations:
694        let mut resp_iter = resp_array.iter().copied();
695        let ctrl_header = CtrlHeader::from_byte_iter(&mut resp_iter)
696            .expect("Failed to parse VirtIO response CtrlHeader");
697
698        // We now match the current device state with the ctrl_type
699        // that was returned to continue parsing:
700        match (self.state.get(), ctrl_header.ctrl_type) {
701            (
702                VirtIOGPUState::InitializingResourceCreate2D,
703                ResourceCreate2DResp::EXPECTED_CTRL_TYPE,
704            ) => {
705                // Parse the remainder of the response:
706                let resp = ResourceCreate2DResp::from_byte_iter_post_ctrl_header(
707                    ctrl_header,
708                    &mut resp_iter,
709                )
710                .expect("Failed to parse VirtIO GPU ResourceCreate2DResp");
711
712                // Continue the initialization routine:
713                self.initialize_resource_create_2d_resp(resp, req_array, resp_array);
714            }
715
716            (
717                VirtIOGPUState::InitializingResourceAttachBacking,
718                ResourceAttachBackingResp::EXPECTED_CTRL_TYPE,
719            ) => {
720                // Parse the remainder of the response:
721                let resp = ResourceAttachBackingResp::from_byte_iter_post_ctrl_header(
722                    ctrl_header,
723                    &mut resp_iter,
724                )
725                .expect("Failed to parse VirtIO GPU ResourceAttachBackingResp");
726
727                // Continue the initialization routine:
728                self.initialize_resource_attach_backing_resp(resp, req_array, resp_array);
729            }
730
731            (VirtIOGPUState::InitializingSetScanout, SetScanoutResp::EXPECTED_CTRL_TYPE) => {
732                // Parse the remainder of the response:
733                let resp =
734                    SetScanoutResp::from_byte_iter_post_ctrl_header(ctrl_header, &mut resp_iter)
735                        .expect("Failed to parse VirtIO GPU SetScanoutResp");
736
737                // Continue the initialization routine:
738                self.initialize_set_scanout_resp(resp, req_array, resp_array);
739            }
740
741            (
742                VirtIOGPUState::InitializingResourceDetachBacking,
743                ResourceDetachBackingResp::EXPECTED_CTRL_TYPE,
744            ) => {
745                // Parse the remainder of the response:
746                let resp = ResourceDetachBackingResp::from_byte_iter_post_ctrl_header(
747                    ctrl_header,
748                    &mut resp_iter,
749                )
750                .expect("Failed to parse VirtIO GPU ResourceDetachBackingResp");
751
752                // Continue the initialization routine:
753                self.initialize_resource_detach_backing_resp(resp, req_array, resp_array);
754            }
755
756            (
757                VirtIOGPUState::DrawResourceAttachBacking,
758                ResourceAttachBackingResp::EXPECTED_CTRL_TYPE,
759            ) => {
760                // Parse the remainder of the response:
761                let _resp = ResourceAttachBackingResp::from_byte_iter_post_ctrl_header(
762                    ctrl_header,
763                    &mut resp_iter,
764                )
765                .expect("Failed to parse VirtIO GPU ResourceAttachBackingResp");
766
767                // Continue the initialization routine:
768                self.continue_draw_transfer_to_host_2d(req_array, resp_array);
769            }
770
771            (VirtIOGPUState::DrawTransferToHost2D, TransferToHost2DResp::EXPECTED_CTRL_TYPE) => {
772                // Parse the remainder of the response:
773                let _resp = TransferToHost2DResp::from_byte_iter_post_ctrl_header(
774                    ctrl_header,
775                    &mut resp_iter,
776                )
777                .expect("Failed to parse VirtIO GPU TransferToHost2DResp");
778
779                // Continue the initialization routine:
780                self.continue_draw_resource_flush(req_array, resp_array);
781            }
782
783            (VirtIOGPUState::DrawResourceFlush, ResourceFlushResp::EXPECTED_CTRL_TYPE) => {
784                // Parse the remainder of the response:
785                let _resp =
786                    ResourceFlushResp::from_byte_iter_post_ctrl_header(ctrl_header, &mut resp_iter)
787                        .expect("Failed to parse VirtIO GPU ResourceFlushResp");
788
789                // Continue the initialization routine:
790                self.continue_draw_resource_flushed(req_array, resp_array);
791            }
792
793            (
794                VirtIOGPUState::DrawResourceDetachBacking,
795                ResourceDetachBackingResp::EXPECTED_CTRL_TYPE,
796            ) => {
797                // Parse the remainder of the response:
798                let _resp = ResourceDetachBackingResp::from_byte_iter_post_ctrl_header(
799                    ctrl_header,
800                    &mut resp_iter,
801                )
802                .expect("Failed to parse VirtIO GPU ResourceDetachBackingResp");
803
804                // Continue the initialization routine:
805                self.continue_draw_resource_detached_backing(req_array, resp_array);
806            }
807
808            (VirtIOGPUState::Uninitialized, _)
809            | (VirtIOGPUState::InitializingResourceCreate2D, _)
810            | (VirtIOGPUState::InitializingResourceAttachBacking, _)
811            | (VirtIOGPUState::InitializingSetScanout, _)
812            | (VirtIOGPUState::InitializingResourceDetachBacking, _)
813            | (VirtIOGPUState::Idle, _)
814            | (VirtIOGPUState::SettingWriteFrame, _)
815            | (VirtIOGPUState::DrawResourceAttachBacking, _)
816            | (VirtIOGPUState::DrawTransferToHost2D, _)
817            | (VirtIOGPUState::DrawResourceFlush, _)
818            | (VirtIOGPUState::DrawResourceDetachBacking, _) => {
819                panic!(
820                    "Received unexpected VirtIO GPU device response. Device \
821                     state: {:?}, ctrl hader: {:?}",
822                    self.state.get(),
823                    ctrl_header
824                );
825            }
826        }
827    }
828}
829
830impl<'a> Screen<'a> for VirtIOGPU<'a, '_> {
831    fn set_client(&self, client: &'a dyn ScreenClient) {
832        self.client.replace(client);
833    }
834
835    fn get_resolution(&self) -> (usize, usize) {
836        (self.width as usize, self.height as usize)
837    }
838
839    fn get_pixel_format(&self) -> ScreenPixelFormat {
840        ScreenPixelFormat::ARGB_8888
841    }
842
843    fn get_rotation(&self) -> ScreenRotation {
844        ScreenRotation::Normal
845    }
846
847    fn set_write_frame(
848        &self,
849        x: usize,
850        y: usize,
851        width: usize,
852        height: usize,
853    ) -> Result<(), ErrorCode> {
854        // Make sure we're idle:
855        let VirtIOGPUState::Idle = self.state.get() else {
856            return Err(ErrorCode::BUSY);
857        };
858
859        // We first convert the coordinates to u32s:
860        let x: u32 = x.try_into().map_err(|_| ErrorCode::INVAL)?;
861        let y: u32 = y.try_into().map_err(|_| ErrorCode::INVAL)?;
862        let width: u32 = width.try_into().map_err(|_| ErrorCode::INVAL)?;
863        let height: u32 = height.try_into().map_err(|_| ErrorCode::INVAL)?;
864
865        // Ensure that the draw area actually fits our screen:
866        let x1 = x.checked_add(width).ok_or(ErrorCode::INVAL)?;
867        let y1 = y.checked_add(height).ok_or(ErrorCode::INVAL)?;
868        if x1 > self.width || y1 > self.height {
869            return Err(ErrorCode::INVAL);
870        }
871
872        // Store the new drawing area as the bounding box and offset coordinates
873        // for `write`:
874        self.current_draw_area.set((
875            // Draw area:
876            Rect {
877                x,
878                y,
879                width,
880                height,
881            },
882            // Current draw offset, relative to the draw area itself:
883            (0, 0),
884            // Precompute the number of pixels in this draw area:
885            (width as usize)
886                .checked_mul(height as usize)
887                .ok_or(ErrorCode::INVAL)?,
888        ));
889
890        // Set the device state to busy and issue the callback in a deferred
891        // call:
892        self.state.set(VirtIOGPUState::SettingWriteFrame);
893        self.pending_deferred_call_mask
894            .set(deferred_call::PendingDeferredCall::SetWriteFrame);
895        self.deferred_call.set();
896
897        Ok(())
898    }
899
900    fn write(
901        &self,
902        buffer: SubSliceMut<'static, u8>,
903        continue_write: bool,
904    ) -> Result<(), ErrorCode> {
905        // Make sure we're idle:
906        let VirtIOGPUState::Idle = self.state.get() else {
907            return Err(ErrorCode::BUSY);
908        };
909
910        // If `continue_write` is false, we must reset `x_off` and
911        // `y_off`. Otherwise we start at the stored offset.
912        let (draw_rect, mut current_draw_offset, mut remaining_pixels) =
913            self.current_draw_area.get();
914        if !continue_write {
915            current_draw_offset = (0, 0);
916            // This multiplication must not overflow, as we've already performed
917            // it before in `set_write_area`:
918            remaining_pixels = (draw_rect.width as usize)
919                .checked_mul(draw_rect.height as usize)
920                .unwrap();
921        }
922        self.current_draw_area
923            .set((draw_rect, current_draw_offset, remaining_pixels));
924
925        // Ensure that this buffer is evenly divisible by PIXEL_STRIDE and that
926        // it can fit into the remaining part of the draw area:
927        if buffer.len() % PIXEL_STRIDE != 0 {
928            return Err(ErrorCode::INVAL);
929        }
930        if buffer.len() / PIXEL_STRIDE > remaining_pixels {
931            return Err(ErrorCode::SIZE);
932        }
933
934        // Now, the `TRANSFER_TO_HOST_2D` command can only copy rectangles.
935        // However, when we performed a partial write (let's say of just one
936        // pixel), then the current x offset will not perfectly line up with the
937        // left boundary of the overall draw rectangle. Similarly, when the
938        // buffer doesn't perfectly fill up the last row of pixels, we can't
939        // draw them together with the previous rows of the rectangle. Thus, a
940        // single `write` call may result in at most three underlying
941        // `TRANSFER_TO_HOST_2D` commands.
942        //
943        // We use a common subroutine to identify the next data to copy. We
944        // first store the overall subslice active range, and the offset in this
945        // subslice (0 right now!), and then let that subroutine handle the rest:
946        let write_buffer_subslice_range = buffer.active_range();
947        self.write_buffer_subslice_range.set((
948            write_buffer_subslice_range.start,
949            write_buffer_subslice_range.end,
950        ));
951        self.write_buffer_offset.set(0);
952
953        let (req_buffer, resp_buffer) = self.req_resp_buffers.take().unwrap();
954
955        // Now, attach the user-supplied buffer to this device:
956        let buffer_slice = buffer.take();
957
958        const ENTRIES: usize = 1;
959        let cmd_resource_attach_backing_req: ResourceAttachBackingReq<{ ENTRIES }> =
960            ResourceAttachBackingReq {
961                ctrl_header: CtrlHeader {
962                    ctrl_type: ResourceAttachBackingReq::<{ ENTRIES }>::CTRL_TYPE,
963                    flags: 0,
964                    fence_id: 0,
965                    ctx_id: 0,
966                    padding: 0,
967                },
968                resource_id: 1,
969                nr_entries: ENTRIES as u32,
970                entries: [MemEntry {
971                    addr: buffer_slice.as_ptr() as u64 + write_buffer_subslice_range.start as u64,
972                    length: write_buffer_subslice_range.len() as u32,
973                    padding: 0,
974                }],
975            };
976        cmd_resource_attach_backing_req
977            .write_to_byte_iter(&mut req_buffer.iter_mut())
978            .unwrap();
979
980        assert!(self.write_buffer.replace(buffer_slice).is_none());
981
982        let mut buffer_chain = [
983            Some(VirtqueueBuffer {
984                buf: req_buffer,
985                len: ResourceAttachBackingReq::<{ ENTRIES }>::ENCODED_SIZE,
986                device_writeable: false,
987            }),
988            Some(VirtqueueBuffer {
989                buf: resp_buffer,
990                len: ResourceAttachBackingResp::ENCODED_SIZE,
991                device_writeable: true,
992            }),
993        ];
994        self.control_queue
995            .provide_buffer_chain(&mut buffer_chain)
996            .unwrap();
997
998        self.state.set(VirtIOGPUState::DrawResourceAttachBacking);
999
1000        Ok(())
1001    }
1002
1003    fn set_brightness(&self, _brightness: u16) -> Result<(), ErrorCode> {
1004        // nop, not supported
1005        Ok(())
1006    }
1007
1008    fn set_power(&self, enabled: bool) -> Result<(), ErrorCode> {
1009        if !enabled {
1010            Err(ErrorCode::INVAL)
1011        } else {
1012            Ok(())
1013        }
1014    }
1015
1016    fn set_invert(&self, _enabled: bool) -> Result<(), ErrorCode> {
1017        Err(ErrorCode::NOSUPPORT)
1018    }
1019}
1020
1021impl<'b> SplitVirtqueueClient<'b> for VirtIOGPU<'_, 'b> {
1022    fn buffer_chain_ready(
1023        &self,
1024        _queue_number: u32,
1025        buffer_chain: &mut [Option<VirtqueueBuffer<'b>>],
1026        bytes_used: usize,
1027    ) {
1028        self.buffer_chain_callback(buffer_chain, bytes_used)
1029    }
1030}
1031
1032impl DeferredCallClient for VirtIOGPU<'_, '_> {
1033    fn register(&'static self) {
1034        self.deferred_call.register(self);
1035    }
1036
1037    fn handle_deferred_call(&self) {
1038        let calls = self.pending_deferred_call_mask.get_copy_and_clear();
1039        calls.for_each_call(|call| match call {
1040            deferred_call::PendingDeferredCall::SetWriteFrame => {
1041                let VirtIOGPUState::SettingWriteFrame = self.state.get() else {
1042                    panic!(
1043                        "Unexpected VirtIOGPUState {:?} for SetWriteFrame \
1044                         deferred call",
1045                        self.state.get()
1046                    );
1047                };
1048
1049                // Set the device staste back to idle:
1050                self.state.set(VirtIOGPUState::Idle);
1051
1052                // Issue callback:
1053                self.client.map(|c| c.command_complete(Ok(())));
1054            }
1055        })
1056    }
1057}
1058
1059impl VirtIODeviceDriver for VirtIOGPU<'_, '_> {
1060    fn negotiate_features(&self, _offered_features: u64) -> Option<u64> {
1061        // We don't support any special features and do not care about
1062        // what the device offers.
1063        Some(0)
1064    }
1065
1066    fn device_type(&self) -> VirtIODeviceType {
1067        VirtIODeviceType::GPUDevice
1068    }
1069}