devices: gpu: add plane info response support

In order to properly send dmabufs over the wayland protocol, accurate
buffer metadata is needed in the guest. This change plumbs information
from minigbm allocations to the guest using a virtio-gpu response.

BUG=875998
TEST=wayland-simple-egl

Change-Id: I5c80d539bc7757c302ad7adf56f5d3b011304617
Reviewed-on: https://chromium-review.googlesource.com/1227054
Commit-Ready: Zach Reizner <zachr@chromium.org>
Tested-by: Zach Reizner <zachr@chromium.org>
Reviewed-by: David Riley <davidriley@chromium.org>
This commit is contained in:
Zach Reizner 2018-09-05 10:23:46 -07:00 committed by chrome-bot
parent 200fd78ff1
commit c5899296c4
2 changed files with 90 additions and 4 deletions

View file

@ -24,8 +24,9 @@ use super::gpu_renderer::{
};
use super::super::resource_bridge::*;
use super::protocol::GpuResponse;
use super::protocol::{VIRTIO_GPU_CAPSET_VIRGL, VIRTIO_GPU_CAPSET_VIRGL2};
use super::protocol::{
GpuResponse, GpuResponsePlaneInfo, VIRTIO_GPU_CAPSET_VIRGL, VIRTIO_GPU_CAPSET_VIRGL2,
};
const DEFAULT_WIDTH: u32 = 1280;
const DEFAULT_HEIGHT: u32 = 1024;
@ -798,10 +799,21 @@ impl Backend {
let res = self.renderer.import_resource(create_args, &image);
match res {
Ok(res) => {
let format_modifier = buffer.format_modifier();
let mut plane_info = Vec::with_capacity(buffer.num_planes());
for plane_index in 0..buffer.num_planes() {
plane_info.push(GpuResponsePlaneInfo {
stride: buffer.plane_stride(plane_index),
offset: buffer.plane_offset(plane_index),
});
}
let mut backed =
BackedBuffer::new_renderer_registered(buffer, res, image);
slot.insert(Box::new(backed));
GpuResponse::OkNoData
GpuResponse::OkResourcePlaneInfo {
format_modifier,
plane_info,
}
}
Err(e) => {
error!("failed to import renderer resource: {}", e);

View file

@ -44,6 +44,7 @@ pub const VIRTIO_GPU_RESP_OK_NODATA: u32 = 0x1100;
pub const VIRTIO_GPU_RESP_OK_DISPLAY_INFO: u32 = 0x1101;
pub const VIRTIO_GPU_RESP_OK_CAPSET_INFO: u32 = 0x1102;
pub const VIRTIO_GPU_RESP_OK_CAPSET: u32 = 0x1103;
pub const VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO: u32 = 0x1104;
/* error responses */
pub const VIRTIO_GPU_RESP_ERR_UNSPEC: u32 = 0x1200;
@ -79,6 +80,7 @@ pub fn virtio_gpu_cmd_str(cmd: u32) -> &'static str {
VIRTIO_GPU_RESP_OK_DISPLAY_INFO => "VIRTIO_GPU_RESP_OK_DISPLAY_INFO",
VIRTIO_GPU_RESP_OK_CAPSET_INFO => "VIRTIO_GPU_RESP_OK_CAPSET_INFO",
VIRTIO_GPU_RESP_OK_CAPSET => "VIRTIO_GPU_RESP_OK_CAPSET",
VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO => "VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO",
VIRTIO_GPU_RESP_ERR_UNSPEC => "VIRTIO_GPU_RESP_ERR_UNSPEC",
VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY => "VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY",
VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID => "VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID",
@ -417,6 +419,22 @@ pub struct virtio_gpu_resp_capset {
unsafe impl DataInit for virtio_gpu_resp_capset {}
/* VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO */
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct virtio_gpu_resp_resource_plane_info {
pub hdr: virtio_gpu_ctrl_hdr,
pub count: Le32,
pub padding: Le32,
pub format_modifier: Le64,
pub strides: [Le32; 4],
pub offsets: [Le32; 4],
}
unsafe impl DataInit for virtio_gpu_resp_resource_plane_info {}
const PLANE_INFO_MAX_COUNT: usize = 4;
pub const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0;
#[derive(Copy, Clone, Debug)]
@ -567,13 +585,27 @@ impl GpuCommand {
}
}
#[derive(Debug, PartialEq)]
pub struct GpuResponsePlaneInfo {
pub stride: u32,
pub offset: u32,
}
/// A response to a `GpuCommand`. These correspond to `VIRTIO_GPU_RESP_*`.
#[derive(Debug, PartialEq)]
pub enum GpuResponse {
OkNoData,
OkDisplayInfo(Vec<(u32, u32)>),
OkCapsetInfo { id: u32, version: u32, size: u32 },
OkCapsetInfo {
id: u32,
version: u32,
size: u32,
},
OkCapset(Vec<u8>),
OkResourcePlaneInfo {
format_modifier: u64,
plane_info: Vec<GpuResponsePlaneInfo>,
},
ErrUnspec,
ErrOutOfMemory,
ErrInvalidScanoutId,
@ -589,6 +621,8 @@ pub enum GpuResponseEncodeError {
Memory(VolatileMemoryError),
/// More displays than are valid were in a `OkDisplayInfo`.
TooManyDisplays(usize),
/// More planes than are valid were in a `OkResourcePlaneInfo`.
TooManyPlanes(usize),
}
impl From<VolatileMemoryError> for GpuResponseEncodeError {
@ -647,6 +681,44 @@ impl GpuResponse {
resp_data_slice.copy_from(data);
size_of_val(&hdr) + data.len()
}
GpuResponse::OkResourcePlaneInfo {
format_modifier,
ref plane_info,
} => {
if plane_info.len() > PLANE_INFO_MAX_COUNT {
return Err(GpuResponseEncodeError::TooManyPlanes(plane_info.len()));
}
let mut strides = [Le32::default(); PLANE_INFO_MAX_COUNT];
let mut offsets = [Le32::default(); PLANE_INFO_MAX_COUNT];
for (plane_index, plane) in plane_info.iter().enumerate() {
strides[plane_index] = plane.stride.into();
offsets[plane_index] = plane.offset.into();
}
let plane_info = virtio_gpu_resp_resource_plane_info {
hdr,
count: Le32::from(plane_info.len() as u32),
padding: 0.into(),
format_modifier: format_modifier.into(),
strides,
offsets,
};
match resp.get_ref(0) {
Ok(resp_ref) => {
resp_ref.store(plane_info);
size_of_val(&plane_info)
}
_ => {
// In case there is too little room in the response slice to store the
// entire virtio_gpu_resp_resource_plane_info, convert response to a regular
// VIRTIO_GPU_RESP_OK_NODATA and attempt to return that.
resp.get_ref(0)?.store(virtio_gpu_ctrl_hdr {
type_: Le32::from(VIRTIO_GPU_RESP_OK_NODATA),
..hdr
});
size_of_val(&hdr)
}
}
}
_ => {
resp.get_ref(0)?.store(hdr);
size_of_val(&hdr)
@ -662,6 +734,7 @@ impl GpuResponse {
GpuResponse::OkDisplayInfo(_) => VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
GpuResponse::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO,
GpuResponse::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET,
GpuResponse::OkResourcePlaneInfo { .. } => VIRTIO_GPU_RESP_OK_RESOURCE_PLANE_INFO,
GpuResponse::ErrUnspec => VIRTIO_GPU_RESP_ERR_UNSPEC,
GpuResponse::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
GpuResponse::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
@ -678,6 +751,7 @@ impl GpuResponse {
GpuResponse::OkDisplayInfo(_) => true,
GpuResponse::OkCapsetInfo { .. } => true,
GpuResponse::OkCapset(_) => true,
GpuResponse::OkResourcePlaneInfo { .. } => true,
_ => false,
}
}