1use core::cell::Cell;
6use core::ops::Range;
7
8use kernel::deferred_call::{DeferredCall, DeferredCallClient};
9use kernel::hil::screen::{Screen, ScreenClient, ScreenPixelFormat, ScreenRotation};
10use kernel::utilities::cells::{OptionalCell, TakeCell};
11use kernel::utilities::leasable_buffer::SubSliceMut;
12use kernel::ErrorCode;
13
14use super::super::devices::{VirtIODeviceDriver, VirtIODeviceType};
15use super::super::queues::split_queue::{SplitVirtqueue, SplitVirtqueueClient, VirtqueueBuffer};
16
17mod deferred_call;
18mod helpers;
19mod messages;
20
21use messages::{
22 ctrl_header::CtrlHeader,
23 resource_attach_backing::{MemEntry, ResourceAttachBackingReq, ResourceAttachBackingResp},
24 resource_create_2d::{ResourceCreate2DReq, ResourceCreate2DResp, VideoFormat},
25 resource_detach_backing::{ResourceDetachBackingReq, ResourceDetachBackingResp},
26 resource_flush::{ResourceFlushReq, ResourceFlushResp},
27 set_scanout::{SetScanoutReq, SetScanoutResp},
28 transfer_to_host_2d::{TransferToHost2DReq, TransferToHost2DResp},
29 Rect, VirtIOGPUReq, VirtIOGPUResp,
30};
31
32pub const PIXEL_STRIDE: usize = 4;
36
37pub const MAX_ATTACH_BACKING_REQ_MEMORY_ENTRIES: usize = 1;
45
46pub const MAX_REQ_SIZE: usize = helpers::max(&[
49 ResourceCreate2DReq::ENCODED_SIZE,
50 ResourceAttachBackingReq::<{ MAX_ATTACH_BACKING_REQ_MEMORY_ENTRIES }>::ENCODED_SIZE,
51 SetScanoutReq::ENCODED_SIZE,
52 TransferToHost2DReq::ENCODED_SIZE,
53 ResourceFlushReq::ENCODED_SIZE,
54 ResourceDetachBackingReq::ENCODED_SIZE,
55]);
56
57pub const MAX_RESP_SIZE: usize = helpers::max(&[
60 ResourceCreate2DResp::ENCODED_SIZE,
61 ResourceAttachBackingResp::ENCODED_SIZE,
62 SetScanoutResp::ENCODED_SIZE,
63 ResourceFlushResp::ENCODED_SIZE,
64 ResourceDetachBackingResp::ENCODED_SIZE,
65]);
66
67#[derive(Copy, Clone, Debug)]
69pub enum VirtIOGPUState {
70 Uninitialized,
71 InitializingResourceCreate2D,
72 InitializingResourceAttachBacking,
73 InitializingSetScanout,
74 InitializingResourceDetachBacking,
75 Idle,
76 SettingWriteFrame,
77 DrawResourceAttachBacking,
78 DrawTransferToHost2D,
79 DrawResourceFlush,
80 DrawResourceDetachBacking,
81}
82
83pub struct VirtIOGPU<'a, 'b> {
88 client: OptionalCell<&'a dyn ScreenClient>,
90 state: Cell<VirtIOGPUState>,
91 deferred_call: DeferredCall,
92 pending_deferred_call_mask: deferred_call::PendingDeferredCallMask,
93
94 control_queue: &'a SplitVirtqueue<'a, 'b, 2>,
96 req_resp_buffers: OptionalCell<(&'b mut [u8; MAX_REQ_SIZE], &'b mut [u8; MAX_RESP_SIZE])>,
97
98 width: u32,
100 height: u32,
101
102 current_draw_area: Cell<(
106 Rect,
108 (u32, u32),
110 usize,
112 )>,
113
114 write_buffer_subslice_range: Cell<(usize, usize)>,
119
120 write_buffer_offset: Cell<usize>,
125
126 write_buffer: TakeCell<'static, [u8]>,
128
129 current_transfer_area_pixels: Cell<(Rect, usize)>,
131}
132
133impl<'a, 'b> VirtIOGPU<'a, 'b> {
134 pub fn new(
135 control_queue: &'a SplitVirtqueue<'a, 'b, 2>,
136 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
137 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
138 width: usize,
139 height: usize,
140 ) -> Result<VirtIOGPU<'a, 'b>, ErrorCode> {
141 let width: u32 = width.try_into().map_err(|_| ErrorCode::SIZE)?;
142 let height: u32 = height.try_into().map_err(|_| ErrorCode::SIZE)?;
143
144 Ok(VirtIOGPU {
145 client: OptionalCell::empty(),
146 state: Cell::new(VirtIOGPUState::Uninitialized),
147 deferred_call: DeferredCall::new(),
148 pending_deferred_call_mask: deferred_call::PendingDeferredCallMask::new(),
149
150 control_queue,
151 req_resp_buffers: OptionalCell::new((req_buffer, resp_buffer)),
152
153 width,
154 height,
155
156 current_draw_area: Cell::new((Rect::empty(), (0, 0), 0)),
157 write_buffer_subslice_range: Cell::new((0, 0)),
158 write_buffer_offset: Cell::new(0),
159 write_buffer: TakeCell::empty(),
160 current_transfer_area_pixels: Cell::new((Rect::empty(), 0)),
161 })
162 }
163
164 pub fn initialize(&self) -> Result<(), ErrorCode> {
165 let VirtIOGPUState::Uninitialized = self.state.get() else {
167 return Err(ErrorCode::ALREADY);
168 };
169
170 self.control_queue.enable_used_callbacks();
172
173 let (req_buffer, resp_buffer) = self.req_resp_buffers.take().unwrap();
176
177 let cmd_resource_create_2d_req = ResourceCreate2DReq {
179 ctrl_header: CtrlHeader {
180 ctrl_type: ResourceCreate2DReq::CTRL_TYPE,
181 flags: 0,
182 fence_id: 0,
183 ctx_id: 0,
184 padding: 0,
185 },
186 resource_id: 1,
187 format: VideoFormat::A8R8G8B8Unorm,
188 width: self.width,
189 height: self.height,
190 };
191 cmd_resource_create_2d_req
192 .write_to_byte_iter(&mut req_buffer.iter_mut())
193 .unwrap();
194
195 let mut buffer_chain = [
196 Some(VirtqueueBuffer {
197 buf: req_buffer,
198 len: ResourceCreate2DReq::ENCODED_SIZE,
199 device_writeable: false,
200 }),
201 Some(VirtqueueBuffer {
202 buf: resp_buffer,
203 len: ResourceCreate2DResp::ENCODED_SIZE,
204 device_writeable: true,
205 }),
206 ];
207 self.control_queue
208 .provide_buffer_chain(&mut buffer_chain)
209 .unwrap();
210
211 self.state.set(VirtIOGPUState::InitializingResourceCreate2D);
212
213 Ok(())
214 }
215
216 fn initialize_resource_create_2d_resp(
217 &self,
218 _resp: ResourceCreate2DResp,
219 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
220 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
221 ) {
222 const ENTRIES: usize = 1;
226 let cmd_resource_attach_backing_req: ResourceAttachBackingReq<{ ENTRIES }> =
227 ResourceAttachBackingReq {
228 ctrl_header: CtrlHeader {
229 ctrl_type: ResourceAttachBackingReq::<{ ENTRIES }>::CTRL_TYPE,
230 flags: 0,
231 fence_id: 0,
232 ctx_id: 0,
233 padding: 0,
234 },
235 resource_id: 1,
236 nr_entries: ENTRIES as u32,
237 entries: [MemEntry {
238 addr: 1,
240 length: 1,
241 padding: 0,
242 }],
243 };
244 cmd_resource_attach_backing_req
245 .write_to_byte_iter(&mut req_buffer.iter_mut())
246 .unwrap();
247
248 let mut buffer_chain = [
249 Some(VirtqueueBuffer {
250 buf: req_buffer,
251 len: ResourceAttachBackingReq::<{ ENTRIES }>::ENCODED_SIZE,
252 device_writeable: false,
253 }),
254 Some(VirtqueueBuffer {
255 buf: resp_buffer,
256 len: ResourceAttachBackingResp::ENCODED_SIZE,
257 device_writeable: true,
258 }),
259 ];
260 self.control_queue
261 .provide_buffer_chain(&mut buffer_chain)
262 .unwrap();
263
264 self.state
265 .set(VirtIOGPUState::InitializingResourceAttachBacking);
266 }
267
268 fn initialize_resource_attach_backing_resp(
269 &self,
270 _resp: ResourceAttachBackingResp,
271 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
272 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
273 ) {
274 let cmd_set_scanout_req = SetScanoutReq {
276 ctrl_header: CtrlHeader {
277 ctrl_type: SetScanoutReq::CTRL_TYPE,
278 flags: 0,
279 fence_id: 0,
280 ctx_id: 0,
281 padding: 0,
282 },
283 r: Rect {
284 x: 0,
285 y: 0,
286 width: self.width,
287 height: self.height,
288 },
289 scanout_id: 0,
290 resource_id: 1,
291 };
292 cmd_set_scanout_req
293 .write_to_byte_iter(&mut req_buffer.iter_mut())
294 .unwrap();
295
296 let mut buffer_chain = [
297 Some(VirtqueueBuffer {
298 buf: req_buffer,
299 len: SetScanoutReq::ENCODED_SIZE,
300 device_writeable: false,
301 }),
302 Some(VirtqueueBuffer {
303 buf: resp_buffer,
304 len: SetScanoutResp::ENCODED_SIZE,
305 device_writeable: true,
306 }),
307 ];
308 self.control_queue
309 .provide_buffer_chain(&mut buffer_chain)
310 .unwrap();
311
312 self.state.set(VirtIOGPUState::InitializingSetScanout);
313 }
314
315 fn initialize_set_scanout_resp(
316 &self,
317 _resp: SetScanoutResp,
318 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
319 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
320 ) {
321 let cmd_resource_detach_backing_req = ResourceDetachBackingReq {
323 ctrl_header: CtrlHeader {
324 ctrl_type: ResourceDetachBackingReq::CTRL_TYPE,
325 flags: 0,
326 fence_id: 0,
327 ctx_id: 0,
328 padding: 0,
329 },
330 resource_id: 1,
331 padding: 0,
332 };
333 cmd_resource_detach_backing_req
334 .write_to_byte_iter(&mut req_buffer.iter_mut())
335 .unwrap();
336
337 let mut buffer_chain = [
338 Some(VirtqueueBuffer {
339 buf: req_buffer,
340 len: ResourceDetachBackingReq::ENCODED_SIZE,
341 device_writeable: false,
342 }),
343 Some(VirtqueueBuffer {
344 buf: resp_buffer,
345 len: ResourceDetachBackingResp::ENCODED_SIZE,
346 device_writeable: true,
347 }),
348 ];
349 self.control_queue
350 .provide_buffer_chain(&mut buffer_chain)
351 .unwrap();
352
353 self.state
354 .set(VirtIOGPUState::InitializingResourceDetachBacking);
355 }
356
357 fn initialize_resource_detach_backing_resp(
358 &self,
359 _resp: ResourceDetachBackingResp,
360 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
361 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
362 ) {
363 self.req_resp_buffers.replace((req_buffer, resp_buffer));
365
366 self.state.set(VirtIOGPUState::Idle);
368
369 self.client.map(|c| c.screen_is_ready());
371 }
372
373 fn continue_draw_transfer_to_host_2d(
374 &self,
375 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
376 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
377 ) {
378 let (draw_rect, current_draw_offset, remaining_pixels) = self.current_draw_area.get();
392 let (write_buffer_subslice_range_start, write_buffer_subslice_range_end) =
393 self.write_buffer_subslice_range.get();
394 let write_buffer_subslice_range = Range {
395 start: write_buffer_subslice_range_start,
396 end: write_buffer_subslice_range_end,
397 };
398 let write_buffer_offset = self.write_buffer_offset.get();
399
400 let write_buffer_remaining_bytes = write_buffer_subslice_range
402 .len()
403 .checked_sub(write_buffer_offset)
404 .unwrap();
405 assert!(write_buffer_remaining_bytes % PIXEL_STRIDE == 0);
406 let write_buffer_remaining_pixels = write_buffer_remaining_bytes / PIXEL_STRIDE;
407 assert!(write_buffer_remaining_pixels <= remaining_pixels);
408
409 let transfer_pixels = if draw_rect.is_empty() {
413 0
416 } else if current_draw_offset.0 == 0 {
417 assert!(current_draw_offset.1 <= draw_rect.height || remaining_pixels == 0);
421 if current_draw_offset.1 >= draw_rect.height {
422 assert!(draw_rect.width as usize >= write_buffer_remaining_pixels);
428 write_buffer_remaining_pixels
429 } else {
430 write_buffer_remaining_pixels / (draw_rect.width as usize)
435 * (draw_rect.width as usize)
436 }
437 } else {
438 let remaining_row_width = draw_rect.width.checked_sub(current_draw_offset.0).unwrap();
443 core::cmp::min(remaining_row_width as usize, write_buffer_remaining_pixels)
444 };
445
446 if transfer_pixels == 0 {
449 let cmd_resource_detach_backing_req = ResourceDetachBackingReq {
450 ctrl_header: CtrlHeader {
451 ctrl_type: ResourceDetachBackingReq::CTRL_TYPE,
452 flags: 0,
453 fence_id: 0,
454 ctx_id: 0,
455 padding: 0,
456 },
457 resource_id: 1,
458 padding: 0,
459 };
460 cmd_resource_detach_backing_req
461 .write_to_byte_iter(&mut req_buffer.iter_mut())
462 .unwrap();
463
464 let mut buffer_chain = [
465 Some(VirtqueueBuffer {
466 buf: req_buffer,
467 len: ResourceDetachBackingReq::ENCODED_SIZE,
468 device_writeable: false,
469 }),
470 Some(VirtqueueBuffer {
471 buf: resp_buffer,
472 len: ResourceDetachBackingResp::ENCODED_SIZE,
473 device_writeable: true,
474 }),
475 ];
476 self.control_queue
477 .provide_buffer_chain(&mut buffer_chain)
478 .unwrap();
479
480 self.state.set(VirtIOGPUState::DrawResourceDetachBacking);
481
482 return;
483 }
484
485 let transfer_rect = Rect {
488 x: draw_rect.x.checked_add(current_draw_offset.0).unwrap(),
489 y: draw_rect.y.checked_add(current_draw_offset.1).unwrap(),
490 width: core::cmp::min(transfer_pixels, draw_rect.width as usize) as u32,
491 height: transfer_pixels.div_ceil(draw_rect.width as usize) as u32,
492 };
493 self.current_transfer_area_pixels
494 .set((transfer_rect, transfer_pixels));
495
496 let cmd_transfer_to_host_2d_req = TransferToHost2DReq {
498 ctrl_header: CtrlHeader {
499 ctrl_type: TransferToHost2DReq::CTRL_TYPE,
500 flags: 0,
501 fence_id: 0,
502 ctx_id: 0,
503 padding: 0,
504 },
505 r: transfer_rect,
506 offset: write_buffer_offset as u64,
507 resource_id: 1,
508 padding: 0,
509 };
510 cmd_transfer_to_host_2d_req
516 .write_to_byte_iter(&mut req_buffer.iter_mut())
517 .unwrap();
518
519 let mut buffer_chain = [
520 Some(VirtqueueBuffer {
521 buf: req_buffer,
522 len: TransferToHost2DReq::ENCODED_SIZE,
523 device_writeable: false,
524 }),
525 Some(VirtqueueBuffer {
526 buf: resp_buffer,
527 len: TransferToHost2DResp::ENCODED_SIZE,
528 device_writeable: true,
529 }),
530 ];
531 self.control_queue
532 .provide_buffer_chain(&mut buffer_chain)
533 .unwrap();
534
535 self.state.set(VirtIOGPUState::DrawTransferToHost2D);
536 }
537
538 fn continue_draw_resource_flush(
539 &self,
540 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
541 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
542 ) {
543 let (current_transfer_area, _) = self.current_transfer_area_pixels.get();
544
545 let cmd_resource_flush_req = ResourceFlushReq {
546 ctrl_header: CtrlHeader {
547 ctrl_type: ResourceFlushReq::CTRL_TYPE,
548 flags: 0,
549 fence_id: 0,
550 ctx_id: 0,
551 padding: 0,
552 },
553 r: current_transfer_area,
554 resource_id: 1,
555 padding: 0,
556 };
557 cmd_resource_flush_req
558 .write_to_byte_iter(&mut req_buffer.iter_mut())
559 .unwrap();
560
561 let mut buffer_chain = [
562 Some(VirtqueueBuffer {
563 buf: req_buffer,
564 len: ResourceFlushReq::ENCODED_SIZE,
565 device_writeable: false,
566 }),
567 Some(VirtqueueBuffer {
568 buf: resp_buffer,
569 len: ResourceFlushResp::ENCODED_SIZE,
570 device_writeable: true,
571 }),
572 ];
573 self.control_queue
574 .provide_buffer_chain(&mut buffer_chain)
575 .unwrap();
576
577 self.state.set(VirtIOGPUState::DrawResourceFlush);
578 }
579
580 fn continue_draw_resource_flushed(
581 &self,
582 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
583 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
584 ) {
585 let (draw_rect, mut current_draw_offset, mut remaining_pixels) =
589 self.current_draw_area.get();
590 let mut write_buffer_offset = self.write_buffer_offset.get();
591
592 let (drawn_area, drawn_pixels) = self.current_transfer_area_pixels.get();
594
595 current_draw_offset.0 = drawn_area
599 .x
600 .checked_add(drawn_area.width)
601 .and_then(|drawn_x1| drawn_x1.checked_sub(draw_rect.x))
602 .unwrap();
603 current_draw_offset.1 = drawn_area
604 .y
605 .checked_add(drawn_area.height)
606 .and_then(|drawn_y1| drawn_y1.checked_sub(draw_rect.y))
607 .unwrap();
608
609 assert!(current_draw_offset.0 <= draw_rect.width);
612 if current_draw_offset.0 == draw_rect.width {
613 current_draw_offset.0 = 0;
614 current_draw_offset.1 = current_draw_offset.1.checked_add(1).unwrap();
615 }
616
617 assert!(remaining_pixels >= drawn_pixels);
619 remaining_pixels -= drawn_pixels;
620
621 write_buffer_offset += drawn_pixels.checked_mul(PIXEL_STRIDE).unwrap();
623
624 self.current_draw_area
626 .set((draw_rect, current_draw_offset, remaining_pixels));
627 self.write_buffer_offset.set(write_buffer_offset);
628
629 self.continue_draw_transfer_to_host_2d(req_buffer, resp_buffer);
631 }
632
633 fn continue_draw_resource_detached_backing(
634 &self,
635 req_buffer: &'b mut [u8; MAX_REQ_SIZE],
636 resp_buffer: &'b mut [u8; MAX_RESP_SIZE],
637 ) {
638 self.req_resp_buffers.replace((req_buffer, resp_buffer));
639 self.state.set(VirtIOGPUState::Idle);
640
641 let (write_buffer_subslice_range_start, write_buffer_subslice_range_end) =
642 self.write_buffer_subslice_range.get();
643 let write_buffer_subslice_range = Range {
644 start: write_buffer_subslice_range_start,
645 end: write_buffer_subslice_range_end,
646 };
647
648 let mut subslice = SubSliceMut::new(self.write_buffer.take().unwrap());
649 subslice.slice(write_buffer_subslice_range);
650
651 self.client.map(|c| c.write_complete(subslice, Ok(())));
652 }
653
654 fn buffer_chain_callback(
655 &self,
656 buffer_chain: &mut [Option<VirtqueueBuffer<'b>>],
657 _bytes_used: usize,
658 ) {
659 let req_buffer = buffer_chain
662 .get_mut(0)
663 .and_then(|opt_buf| opt_buf.take())
664 .expect("Missing request buffer in VirtIO GPU buffer chain");
665 let resp_buffer = buffer_chain
666 .get_mut(1)
667 .and_then(|opt_buf| opt_buf.take())
668 .expect("Missing request buffer in VirtIO GPU buffer chain");
669
670 let req_array: &mut [u8; MAX_REQ_SIZE] = req_buffer
672 .buf
673 .try_into()
674 .expect("Returned VirtIO GPU request buffer has unexpected size!");
675
676 let resp_length = resp_buffer.len;
677 let resp_array: &mut [u8; MAX_RESP_SIZE] = resp_buffer
678 .buf
679 .try_into()
680 .expect("Returned VirtIO GPU response buffer has unexpected size!");
681
682 if resp_length < CtrlHeader::ENCODED_SIZE {
684 panic!(
685 "VirtIO GPU returned response smaller than the CtrlHeader, \
686 which we cannot parse! Returned bytes: {}",
687 resp_length
688 )
689 }
690
691 let mut resp_iter = resp_array.iter().copied();
695 let ctrl_header = CtrlHeader::from_byte_iter(&mut resp_iter)
696 .expect("Failed to parse VirtIO response CtrlHeader");
697
698 match (self.state.get(), ctrl_header.ctrl_type) {
701 (
702 VirtIOGPUState::InitializingResourceCreate2D,
703 ResourceCreate2DResp::EXPECTED_CTRL_TYPE,
704 ) => {
705 let resp = ResourceCreate2DResp::from_byte_iter_post_ctrl_header(
707 ctrl_header,
708 &mut resp_iter,
709 )
710 .expect("Failed to parse VirtIO GPU ResourceCreate2DResp");
711
712 self.initialize_resource_create_2d_resp(resp, req_array, resp_array);
714 }
715
716 (
717 VirtIOGPUState::InitializingResourceAttachBacking,
718 ResourceAttachBackingResp::EXPECTED_CTRL_TYPE,
719 ) => {
720 let resp = ResourceAttachBackingResp::from_byte_iter_post_ctrl_header(
722 ctrl_header,
723 &mut resp_iter,
724 )
725 .expect("Failed to parse VirtIO GPU ResourceAttachBackingResp");
726
727 self.initialize_resource_attach_backing_resp(resp, req_array, resp_array);
729 }
730
731 (VirtIOGPUState::InitializingSetScanout, SetScanoutResp::EXPECTED_CTRL_TYPE) => {
732 let resp =
734 SetScanoutResp::from_byte_iter_post_ctrl_header(ctrl_header, &mut resp_iter)
735 .expect("Failed to parse VirtIO GPU SetScanoutResp");
736
737 self.initialize_set_scanout_resp(resp, req_array, resp_array);
739 }
740
741 (
742 VirtIOGPUState::InitializingResourceDetachBacking,
743 ResourceDetachBackingResp::EXPECTED_CTRL_TYPE,
744 ) => {
745 let resp = ResourceDetachBackingResp::from_byte_iter_post_ctrl_header(
747 ctrl_header,
748 &mut resp_iter,
749 )
750 .expect("Failed to parse VirtIO GPU ResourceDetachBackingResp");
751
752 self.initialize_resource_detach_backing_resp(resp, req_array, resp_array);
754 }
755
756 (
757 VirtIOGPUState::DrawResourceAttachBacking,
758 ResourceAttachBackingResp::EXPECTED_CTRL_TYPE,
759 ) => {
760 let _resp = ResourceAttachBackingResp::from_byte_iter_post_ctrl_header(
762 ctrl_header,
763 &mut resp_iter,
764 )
765 .expect("Failed to parse VirtIO GPU ResourceAttachBackingResp");
766
767 self.continue_draw_transfer_to_host_2d(req_array, resp_array);
769 }
770
771 (VirtIOGPUState::DrawTransferToHost2D, TransferToHost2DResp::EXPECTED_CTRL_TYPE) => {
772 let _resp = TransferToHost2DResp::from_byte_iter_post_ctrl_header(
774 ctrl_header,
775 &mut resp_iter,
776 )
777 .expect("Failed to parse VirtIO GPU TransferToHost2DResp");
778
779 self.continue_draw_resource_flush(req_array, resp_array);
781 }
782
783 (VirtIOGPUState::DrawResourceFlush, ResourceFlushResp::EXPECTED_CTRL_TYPE) => {
784 let _resp =
786 ResourceFlushResp::from_byte_iter_post_ctrl_header(ctrl_header, &mut resp_iter)
787 .expect("Failed to parse VirtIO GPU ResourceFlushResp");
788
789 self.continue_draw_resource_flushed(req_array, resp_array);
791 }
792
793 (
794 VirtIOGPUState::DrawResourceDetachBacking,
795 ResourceDetachBackingResp::EXPECTED_CTRL_TYPE,
796 ) => {
797 let _resp = ResourceDetachBackingResp::from_byte_iter_post_ctrl_header(
799 ctrl_header,
800 &mut resp_iter,
801 )
802 .expect("Failed to parse VirtIO GPU ResourceDetachBackingResp");
803
804 self.continue_draw_resource_detached_backing(req_array, resp_array);
806 }
807
808 (VirtIOGPUState::Uninitialized, _)
809 | (VirtIOGPUState::InitializingResourceCreate2D, _)
810 | (VirtIOGPUState::InitializingResourceAttachBacking, _)
811 | (VirtIOGPUState::InitializingSetScanout, _)
812 | (VirtIOGPUState::InitializingResourceDetachBacking, _)
813 | (VirtIOGPUState::Idle, _)
814 | (VirtIOGPUState::SettingWriteFrame, _)
815 | (VirtIOGPUState::DrawResourceAttachBacking, _)
816 | (VirtIOGPUState::DrawTransferToHost2D, _)
817 | (VirtIOGPUState::DrawResourceFlush, _)
818 | (VirtIOGPUState::DrawResourceDetachBacking, _) => {
819 panic!(
820 "Received unexpected VirtIO GPU device response. Device \
821 state: {:?}, ctrl hader: {:?}",
822 self.state.get(),
823 ctrl_header
824 );
825 }
826 }
827 }
828}
829
830impl<'a> Screen<'a> for VirtIOGPU<'a, '_> {
831 fn set_client(&self, client: &'a dyn ScreenClient) {
832 self.client.replace(client);
833 }
834
835 fn get_resolution(&self) -> (usize, usize) {
836 (self.width as usize, self.height as usize)
837 }
838
839 fn get_pixel_format(&self) -> ScreenPixelFormat {
840 ScreenPixelFormat::ARGB_8888
841 }
842
843 fn get_rotation(&self) -> ScreenRotation {
844 ScreenRotation::Normal
845 }
846
847 fn set_write_frame(
848 &self,
849 x: usize,
850 y: usize,
851 width: usize,
852 height: usize,
853 ) -> Result<(), ErrorCode> {
854 let VirtIOGPUState::Idle = self.state.get() else {
856 return Err(ErrorCode::BUSY);
857 };
858
859 let x: u32 = x.try_into().map_err(|_| ErrorCode::INVAL)?;
861 let y: u32 = y.try_into().map_err(|_| ErrorCode::INVAL)?;
862 let width: u32 = width.try_into().map_err(|_| ErrorCode::INVAL)?;
863 let height: u32 = height.try_into().map_err(|_| ErrorCode::INVAL)?;
864
865 let x1 = x.checked_add(width).ok_or(ErrorCode::INVAL)?;
867 let y1 = y.checked_add(height).ok_or(ErrorCode::INVAL)?;
868 if x1 > self.width || y1 > self.height {
869 return Err(ErrorCode::INVAL);
870 }
871
872 self.current_draw_area.set((
875 Rect {
877 x,
878 y,
879 width,
880 height,
881 },
882 (0, 0),
884 (width as usize)
886 .checked_mul(height as usize)
887 .ok_or(ErrorCode::INVAL)?,
888 ));
889
890 self.state.set(VirtIOGPUState::SettingWriteFrame);
893 self.pending_deferred_call_mask
894 .set(deferred_call::PendingDeferredCall::SetWriteFrame);
895 self.deferred_call.set();
896
897 Ok(())
898 }
899
900 fn write(
901 &self,
902 buffer: SubSliceMut<'static, u8>,
903 continue_write: bool,
904 ) -> Result<(), ErrorCode> {
905 let VirtIOGPUState::Idle = self.state.get() else {
907 return Err(ErrorCode::BUSY);
908 };
909
910 let (draw_rect, mut current_draw_offset, mut remaining_pixels) =
913 self.current_draw_area.get();
914 if !continue_write {
915 current_draw_offset = (0, 0);
916 remaining_pixels = (draw_rect.width as usize)
919 .checked_mul(draw_rect.height as usize)
920 .unwrap();
921 }
922 self.current_draw_area
923 .set((draw_rect, current_draw_offset, remaining_pixels));
924
925 if buffer.len() % PIXEL_STRIDE != 0 {
928 return Err(ErrorCode::INVAL);
929 }
930 if buffer.len() / PIXEL_STRIDE > remaining_pixels {
931 return Err(ErrorCode::SIZE);
932 }
933
934 let write_buffer_subslice_range = buffer.active_range();
947 self.write_buffer_subslice_range.set((
948 write_buffer_subslice_range.start,
949 write_buffer_subslice_range.end,
950 ));
951 self.write_buffer_offset.set(0);
952
953 let (req_buffer, resp_buffer) = self.req_resp_buffers.take().unwrap();
954
955 let buffer_slice = buffer.take();
957
958 const ENTRIES: usize = 1;
959 let cmd_resource_attach_backing_req: ResourceAttachBackingReq<{ ENTRIES }> =
960 ResourceAttachBackingReq {
961 ctrl_header: CtrlHeader {
962 ctrl_type: ResourceAttachBackingReq::<{ ENTRIES }>::CTRL_TYPE,
963 flags: 0,
964 fence_id: 0,
965 ctx_id: 0,
966 padding: 0,
967 },
968 resource_id: 1,
969 nr_entries: ENTRIES as u32,
970 entries: [MemEntry {
971 addr: buffer_slice.as_ptr() as u64 + write_buffer_subslice_range.start as u64,
972 length: write_buffer_subslice_range.len() as u32,
973 padding: 0,
974 }],
975 };
976 cmd_resource_attach_backing_req
977 .write_to_byte_iter(&mut req_buffer.iter_mut())
978 .unwrap();
979
980 assert!(self.write_buffer.replace(buffer_slice).is_none());
981
982 let mut buffer_chain = [
983 Some(VirtqueueBuffer {
984 buf: req_buffer,
985 len: ResourceAttachBackingReq::<{ ENTRIES }>::ENCODED_SIZE,
986 device_writeable: false,
987 }),
988 Some(VirtqueueBuffer {
989 buf: resp_buffer,
990 len: ResourceAttachBackingResp::ENCODED_SIZE,
991 device_writeable: true,
992 }),
993 ];
994 self.control_queue
995 .provide_buffer_chain(&mut buffer_chain)
996 .unwrap();
997
998 self.state.set(VirtIOGPUState::DrawResourceAttachBacking);
999
1000 Ok(())
1001 }
1002
1003 fn set_brightness(&self, _brightness: u16) -> Result<(), ErrorCode> {
1004 Ok(())
1006 }
1007
1008 fn set_power(&self, enabled: bool) -> Result<(), ErrorCode> {
1009 if !enabled {
1010 Err(ErrorCode::INVAL)
1011 } else {
1012 Ok(())
1013 }
1014 }
1015
1016 fn set_invert(&self, _enabled: bool) -> Result<(), ErrorCode> {
1017 Err(ErrorCode::NOSUPPORT)
1018 }
1019}
1020
1021impl<'b> SplitVirtqueueClient<'b> for VirtIOGPU<'_, 'b> {
1022 fn buffer_chain_ready(
1023 &self,
1024 _queue_number: u32,
1025 buffer_chain: &mut [Option<VirtqueueBuffer<'b>>],
1026 bytes_used: usize,
1027 ) {
1028 self.buffer_chain_callback(buffer_chain, bytes_used)
1029 }
1030}
1031
1032impl DeferredCallClient for VirtIOGPU<'_, '_> {
1033 fn register(&'static self) {
1034 self.deferred_call.register(self);
1035 }
1036
1037 fn handle_deferred_call(&self) {
1038 let calls = self.pending_deferred_call_mask.get_copy_and_clear();
1039 calls.for_each_call(|call| match call {
1040 deferred_call::PendingDeferredCall::SetWriteFrame => {
1041 let VirtIOGPUState::SettingWriteFrame = self.state.get() else {
1042 panic!(
1043 "Unexpected VirtIOGPUState {:?} for SetWriteFrame \
1044 deferred call",
1045 self.state.get()
1046 );
1047 };
1048
1049 self.state.set(VirtIOGPUState::Idle);
1051
1052 self.client.map(|c| c.command_complete(Ok(())));
1054 }
1055 })
1056 }
1057}
1058
1059impl VirtIODeviceDriver for VirtIOGPU<'_, '_> {
1060 fn negotiate_features(&self, _offered_features: u64) -> Option<u64> {
1061 Some(0)
1064 }
1065
1066 fn device_type(&self) -> VirtIODeviceType {
1067 VirtIODeviceType::GPUDevice
1068 }
1069}