device: vhost-user: Cross-platform GPU base

Makes the async vhost-user backend cross-platform. The next change will
add the plumbing to turn it on. The plan is to create GpuBackendConfig
and GpuVmmConfig in the broker and pass to the relevant processes.
This way, we can also pass GpuBackendConfig to the main process if we
want to use the original non-vhost-user worker. The config changes will
be included with the plumbing CL that follows.

- Split into a sys module.

- Introduce 'platform_workers' that tracks platform-dependent futures.
  Reasoning: Windows will need to be able to launch more futures at
  runtime due to our input handling, it's useful to have a vector of
  workers to append to. This way the specific worker function doesn't
  need to leak into the shared file. We can also put the resource
  bridge workers here following the same logic.

- Introduce backend and VMM config structures to pass around.

BUG=b:243061269
TEST=downstream / presubmit

Change-Id: I53458c4dd2cf74b9e6bf5d10819533206e47a683
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3963645
Reviewed-by: Keiichi Watanabe <keiichiw@chromium.org>
Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
Reviewed-by: Kaiyi Li <kaiyili@google.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Commit-Queue: Idan Raiter <idanr@google.com>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
Idan Raiter 2022-10-20 06:35:30 -07:00 committed by crosvm LUCI
parent 479832daef
commit 3c21f8e313
8 changed files with 540 additions and 255 deletions

View file

@ -2,51 +2,37 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
pub mod sys;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use anyhow::anyhow;
use anyhow::bail;
use anyhow::Context;
use argh::FromArgs;
use base::clone_descriptor;
use base::error;
use base::warn;
use base::Event;
use base::FromRawDescriptor;
use base::SafeDescriptor;
use base::Tube;
use base::UnixSeqpacketListener;
use base::UnlinkUnixSeqpacketListener;
use cros_async::AsyncWrapper;
use cros_async::EventAsync;
use cros_async::Executor;
use cros_async::IoSourceExt;
use futures::future::AbortHandle;
use futures::future::Abortable;
use hypervisor::ProtectionType;
use sync::Mutex;
pub use sys::run_gpu_device;
pub use sys::Options;
use vm_memory::GuestMemory;
use vmm_vhost::message::VhostUserProtocolFeatures;
use vmm_vhost::message::VhostUserVirtioFeatures;
use crate::virtio;
use crate::virtio::gpu;
use crate::virtio::gpu::ProcessDisplayResult;
use crate::virtio::vhost::user::device::handler::sys::Doorbell;
use crate::virtio::vhost::user::device::handler::VhostBackendReqConnection;
use crate::virtio::vhost::user::device::handler::VhostBackendReqConnectionState;
use crate::virtio::vhost::user::device::handler::VhostUserBackend;
use crate::virtio::vhost::user::device::listener::sys::VhostUserListener;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
use crate::virtio::vhost::user::device::wl::parse_wayland_sock;
use crate::virtio::DescriptorChain;
use crate::virtio::Gpu;
use crate::virtio::GpuDisplayParameters;
use crate::virtio::GpuParameters;
use crate::virtio::Queue;
use crate::virtio::QueueReader;
use crate::virtio::SharedMemoryRegion;
@ -96,47 +82,6 @@ async fn run_ctrl_queue(
}
}
async fn run_display(
display: Box<dyn IoSourceExt<AsyncWrapper<SafeDescriptor>>>,
state: Rc<RefCell<gpu::Frontend>>,
) {
loop {
if let Err(e) = display.wait_readable().await {
error!(
"Failed to wait for display context to become readable: {}",
e
);
break;
}
match state.borrow_mut().process_display() {
ProcessDisplayResult::Error(e) => {
error!("Failed to process display events: {}", e);
break;
}
ProcessDisplayResult::CloseRequested => break,
ProcessDisplayResult::Success => {}
}
}
}
async fn run_resource_bridge(tube: Box<dyn IoSourceExt<Tube>>, state: Rc<RefCell<gpu::Frontend>>) {
loop {
if let Err(e) = tube.wait_readable().await {
error!(
"Failed to wait for resource bridge tube to become readable: {}",
e
);
break;
}
if let Err(e) = state.borrow_mut().process_resource_bridge(tube.as_source()) {
error!("Failed to process resource bridge: {:#}", e);
break;
}
}
}
struct GpuBackend {
ex: Executor,
gpu: Rc<RefCell<Gpu>>,
@ -144,8 +89,8 @@ struct GpuBackend {
acked_protocol_features: u64,
state: Option<Rc<RefCell<gpu::Frontend>>>,
fence_state: Arc<Mutex<gpu::FenceState>>,
display_worker: Option<AbortHandle>,
workers: [Option<AbortHandle>; MAX_QUEUE_NUM],
queue_workers: [Option<AbortHandle>; MAX_QUEUE_NUM],
platform_workers: Rc<RefCell<Vec<AbortHandle>>>,
backend_req_conn: VhostBackendReqConnectionState,
}
@ -208,7 +153,7 @@ impl VhostUserBackend for GpuBackend {
doorbell: Doorbell,
kick_evt: Event,
) -> anyhow::Result<()> {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
if let Some(handle) = self.queue_workers.get_mut(idx).and_then(Option::take) {
warn!("Starting new queue handler without stopping old handler");
handle.abort();
}
@ -259,40 +204,8 @@ impl VhostUserBackend for GpuBackend {
state
};
// Start handling the resource bridges, if we haven't already.
for bridge in self.resource_bridges.lock().drain(..) {
let tube = self
.ex
.async_from(bridge)
.context("failed to create async tube")?;
self.ex
.spawn_local(run_resource_bridge(tube, state.clone()))
.detach();
}
// Start handling the display, if we haven't already.
if self.display_worker.is_none() {
let display = clone_descriptor(&*state.borrow_mut().display().borrow())
.map(|fd| {
// Safe because we just created this fd.
AsyncWrapper::new(unsafe { SafeDescriptor::from_raw_descriptor(fd) })
})
.context("failed to clone inner WaitContext for gpu display")
.and_then(|ctx| {
self.ex
.async_from(ctx)
.context("failed to create async WaitContext")
})?;
let (handle, registration) = AbortHandle::new_pair();
self.ex
.spawn_local(Abortable::new(
run_display(display, state.clone()),
registration,
))
.detach();
self.display_worker = Some(handle);
}
// Start handling platform-specific workers.
self.start_platform_workers()?;
// Start handling the control queue.
let (handle, registration) = AbortHandle::new_pair();
@ -303,18 +216,18 @@ impl VhostUserBackend for GpuBackend {
))
.detach();
self.workers[idx] = Some(handle);
self.queue_workers[idx] = Some(handle);
Ok(())
}
fn stop_queue(&mut self, idx: usize) {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
if let Some(handle) = self.queue_workers.get_mut(idx).and_then(Option::take) {
handle.abort();
}
}
fn reset(&mut self) {
if let Some(handle) = self.display_worker.take() {
for handle in self.platform_workers.borrow_mut().drain(..) {
handle.abort();
}
@ -335,151 +248,3 @@ impl VhostUserBackend for GpuBackend {
self.backend_req_conn = VhostBackendReqConnectionState::Connected(conn);
}
}
fn gpu_parameters_from_str(input: &str) -> Result<GpuParameters, String> {
serde_json::from_str(input).map_err(|e| e.to_string())
}
#[derive(FromArgs)]
/// GPU device
#[argh(subcommand, name = "gpu")]
pub struct Options {
#[argh(option, arg_name = "PATH")]
/// path to bind a listening vhost-user socket
socket: Option<String>,
#[argh(option, arg_name = "STRING")]
/// VFIO-PCI device name (e.g. '0000:00:07.0')
vfio: Option<String>,
#[argh(option, from_str_fn(parse_wayland_sock), arg_name = "PATH[,name=NAME]")]
/// path to one or more Wayland sockets. The unnamed socket is
/// used for displaying virtual screens while the named ones are used for IPC
wayland_sock: Vec<(String, PathBuf)>,
#[argh(option, arg_name = "PATH")]
/// path to one or more bridge sockets for communicating with
/// other graphics devices (wayland, video, etc)
resource_bridge: Vec<String>,
#[argh(option, arg_name = "DISPLAY")]
/// X11 display name to use
x_display: Option<String>,
#[argh(
option,
from_str_fn(gpu_parameters_from_str),
default = "Default::default()",
arg_name = "JSON"
)]
/// a JSON object of virtio-gpu parameters
params: GpuParameters,
}
pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
let Options {
x_display,
params: mut gpu_parameters,
resource_bridge,
socket,
vfio,
wayland_sock,
} = opts;
let wayland_paths: BTreeMap<_, _> = wayland_sock.into_iter().collect();
let resource_bridge_listeners = resource_bridge
.into_iter()
.map(|p| {
UnixSeqpacketListener::bind(&p)
.map(UnlinkUnixSeqpacketListener)
.with_context(|| format!("failed to bind socket at path {}", p))
})
.collect::<anyhow::Result<Vec<_>>>()?;
if gpu_parameters.display_params.is_empty() {
gpu_parameters
.display_params
.push(GpuDisplayParameters::default());
}
let ex = Executor::new().context("failed to create executor")?;
// We don't know the order in which other devices are going to connect to the resource bridges
// so start listening for all of them on separate threads. Any devices that connect after the
// gpu device starts its queues will not have its resource bridges processed. In practice this
// should be fine since the devices that use the resource bridge always try to connect to the
// gpu device before handling messages from the VM.
let resource_bridges = Arc::new(Mutex::new(Vec::with_capacity(
resource_bridge_listeners.len(),
)));
for listener in resource_bridge_listeners {
let resource_bridges = Arc::clone(&resource_bridges);
ex.spawn_blocking(move || match listener.accept() {
Ok(stream) => resource_bridges
.lock()
.push(Tube::new_from_unix_seqpacket(stream)),
Err(e) => {
let path = listener
.path()
.unwrap_or_else(|_| PathBuf::from("{unknown}"));
error!(
"Failed to accept resource bridge connection for socket {}: {}",
path.display(),
e
);
}
})
.detach();
}
// TODO(b/232344535): Read side of the tube is ignored currently.
// Complete the implementation by polling `exit_evt_rdtube` and
// kill the sibling VM.
let (exit_evt_wrtube, _) =
Tube::directional_pair().context("failed to create vm event tube")?;
let (gpu_control_tube, _) = Tube::pair().context("failed to create gpu control tube")?;
let mut display_backends = vec![
virtio::DisplayBackend::X(x_display),
virtio::DisplayBackend::Stub,
];
if let Some(p) = wayland_paths.get("") {
display_backends.insert(0, virtio::DisplayBackend::Wayland(Some(p.to_owned())));
}
// These are only used when there is an input device.
let event_devices = Vec::new();
// The regular gpu device sets this to true when sandboxing is enabled. Assume that we
// are always sandboxed.
let external_blob = true;
let base_features = virtio::base_features(ProtectionType::Unprotected);
let channels = wayland_paths;
let listener = VhostUserListener::new_from_socket_or_vfio(&socket, &vfio, MAX_QUEUE_NUM, None)?;
let gpu = Rc::new(RefCell::new(Gpu::new(
exit_evt_wrtube,
gpu_control_tube,
Vec::new(), // resource_bridges, handled separately by us
display_backends,
&gpu_parameters,
#[cfg(feature = "virgl_renderer_next")]
/* render_server_fd= */
None,
event_devices,
external_blob,
base_features,
channels,
)));
let backend = Box::new(GpuBackend {
ex: ex.clone(),
gpu,
resource_bridges,
acked_protocol_features: 0,
state: None,
fence_state: Default::default(),
display_worker: None,
workers: Default::default(),
backend_req_conn: VhostBackendReqConnectionState::NoConnection,
});
ex.run_until(listener.run_backend(backend, &ex))?
}

View file

@ -0,0 +1,16 @@
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
cfg_if::cfg_if! {
if #[cfg(unix)] {
pub mod unix;
use unix as platform;
} else if #[cfg(windows)] {
pub mod windows;
use windows as platform;
}
}
pub use platform::run_gpu_device;
pub use platform::Options;

View file

@ -0,0 +1,274 @@
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use anyhow::Context;
use argh::FromArgs;
use base::clone_descriptor;
use base::error;
use base::FromRawDescriptor;
use base::SafeDescriptor;
use base::Tube;
use base::UnixSeqpacketListener;
use base::UnlinkUnixSeqpacketListener;
use cros_async::AsyncWrapper;
use cros_async::Executor;
use cros_async::IoSourceExt;
use futures::future::AbortHandle;
use futures::future::Abortable;
use hypervisor::ProtectionType;
use sync::Mutex;
use crate::virtio;
use crate::virtio::gpu;
use crate::virtio::gpu::ProcessDisplayResult;
use crate::virtio::vhost::user::device::gpu::GpuBackend;
use crate::virtio::vhost::user::device::gpu::MAX_QUEUE_NUM;
use crate::virtio::vhost::user::device::listener::sys::VhostUserListener;
use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
use crate::virtio::vhost::user::device::wl::parse_wayland_sock;
use crate::virtio::vhost::user::VhostBackendReqConnectionState;
use crate::virtio::Gpu;
use crate::virtio::GpuDisplayParameters;
use crate::virtio::GpuParameters;
async fn run_display(
display: Box<dyn IoSourceExt<AsyncWrapper<SafeDescriptor>>>,
state: Rc<RefCell<gpu::Frontend>>,
) {
loop {
if let Err(e) = display.wait_readable().await {
error!(
"Failed to wait for display context to become readable: {}",
e
);
break;
}
match state.borrow_mut().process_display() {
ProcessDisplayResult::Error(e) => {
error!("Failed to process display events: {}", e);
break;
}
ProcessDisplayResult::CloseRequested => break,
ProcessDisplayResult::Success => {}
}
}
}
async fn run_resource_bridge(tube: Box<dyn IoSourceExt<Tube>>, state: Rc<RefCell<gpu::Frontend>>) {
loop {
if let Err(e) = tube.wait_readable().await {
error!(
"Failed to wait for resource bridge tube to become readable: {}",
e
);
break;
}
if let Err(e) = state.borrow_mut().process_resource_bridge(tube.as_source()) {
error!("Failed to process resource bridge: {:#}", e);
break;
}
}
}
impl GpuBackend {
pub fn start_platform_workers(&mut self) -> anyhow::Result<()> {
let state = self
.state
.as_ref()
.context("frontend state wasn't set")?
.clone();
// Start handling the resource bridges.
for bridge in self.resource_bridges.lock().drain(..) {
let tube = self
.ex
.async_from(bridge)
.context("failed to create async tube")?;
let (handle, registration) = AbortHandle::new_pair();
self.ex
.spawn_local(Abortable::new(
run_resource_bridge(tube, state.clone()),
registration,
))
.detach();
self.platform_workers.borrow_mut().push(handle);
}
// Start handling the display.
let display = clone_descriptor(&*state.borrow_mut().display().borrow())
.map(|fd| {
// Safe because we just created this fd.
AsyncWrapper::new(unsafe { SafeDescriptor::from_raw_descriptor(fd) })
})
.context("failed to clone inner WaitContext for gpu display")
.and_then(|ctx| {
self.ex
.async_from(ctx)
.context("failed to create async WaitContext")
})?;
let (handle, registration) = AbortHandle::new_pair();
self.ex
.spawn_local(Abortable::new(run_display(display, state), registration))
.detach();
self.platform_workers.borrow_mut().push(handle);
return Ok(());
}
}
fn gpu_parameters_from_str(input: &str) -> Result<GpuParameters, String> {
serde_json::from_str(input).map_err(|e| e.to_string())
}
#[derive(FromArgs)]
/// GPU device
#[argh(subcommand, name = "gpu")]
pub struct Options {
#[argh(option, arg_name = "PATH")]
/// path to bind a listening vhost-user socket
socket: Option<String>,
#[argh(option, arg_name = "STRING")]
/// VFIO-PCI device name (e.g. '0000:00:07.0')
vfio: Option<String>,
#[argh(option, from_str_fn(parse_wayland_sock), arg_name = "PATH[,name=NAME]")]
/// path to one or more Wayland sockets. The unnamed socket is
/// used for displaying virtual screens while the named ones are used for IPC
wayland_sock: Vec<(String, PathBuf)>,
#[argh(option, arg_name = "PATH")]
/// path to one or more bridge sockets for communicating with
/// other graphics devices (wayland, video, etc)
resource_bridge: Vec<String>,
#[argh(option, arg_name = "DISPLAY")]
/// X11 display name to use
x_display: Option<String>,
#[argh(
option,
from_str_fn(gpu_parameters_from_str),
default = "Default::default()",
arg_name = "JSON"
)]
/// a JSON object of virtio-gpu parameters
params: GpuParameters,
}
pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
let Options {
x_display,
params: mut gpu_parameters,
resource_bridge,
socket,
vfio,
wayland_sock,
} = opts;
let wayland_paths: BTreeMap<_, _> = wayland_sock.into_iter().collect();
let resource_bridge_listeners = resource_bridge
.into_iter()
.map(|p| {
UnixSeqpacketListener::bind(&p)
.map(UnlinkUnixSeqpacketListener)
.with_context(|| format!("failed to bind socket at path {}", p))
})
.collect::<anyhow::Result<Vec<_>>>()?;
if gpu_parameters.display_params.is_empty() {
gpu_parameters
.display_params
.push(GpuDisplayParameters::default());
}
let ex = Executor::new().context("failed to create executor")?;
// We don't know the order in which other devices are going to connect to the resource bridges
// so start listening for all of them on separate threads. Any devices that connect after the
// gpu device starts its queues will not have its resource bridges processed. In practice this
// should be fine since the devices that use the resource bridge always try to connect to the
// gpu device before handling messages from the VM.
let resource_bridges = Arc::new(Mutex::new(Vec::with_capacity(
resource_bridge_listeners.len(),
)));
for listener in resource_bridge_listeners {
let resource_bridges = Arc::clone(&resource_bridges);
ex.spawn_blocking(move || match listener.accept() {
Ok(stream) => resource_bridges
.lock()
.push(Tube::new_from_unix_seqpacket(stream)),
Err(e) => {
let path = listener
.path()
.unwrap_or_else(|_| PathBuf::from("{unknown}"));
error!(
"Failed to accept resource bridge connection for socket {}: {}",
path.display(),
e
);
}
})
.detach();
}
// TODO(b/232344535): Read side of the tube is ignored currently.
// Complete the implementation by polling `exit_evt_rdtube` and
// kill the sibling VM.
let (exit_evt_wrtube, _) =
Tube::directional_pair().context("failed to create vm event tube")?;
let (gpu_control_tube, _) = Tube::pair().context("failed to create gpu control tube")?;
let mut display_backends = vec![
virtio::DisplayBackend::X(x_display),
virtio::DisplayBackend::Stub,
];
if let Some(p) = wayland_paths.get("") {
display_backends.insert(0, virtio::DisplayBackend::Wayland(Some(p.to_owned())));
}
// These are only used when there is an input device.
let event_devices = Vec::new();
// The regular gpu device sets this to true when sandboxing is enabled. Assume that we
// are always sandboxed.
let external_blob = true;
let base_features = virtio::base_features(ProtectionType::Unprotected);
let channels = wayland_paths;
let listener = VhostUserListener::new_from_socket_or_vfio(&socket, &vfio, MAX_QUEUE_NUM, None)?;
let gpu = Rc::new(RefCell::new(Gpu::new(
exit_evt_wrtube,
gpu_control_tube,
Vec::new(), // resource_bridges, handled separately by us
display_backends,
&gpu_parameters,
#[cfg(feature = "virgl_renderer_next")]
/* render_server_fd= */
None,
event_devices,
external_blob,
base_features,
channels,
)));
let backend = Box::new(GpuBackend {
ex: ex.clone(),
gpu,
resource_bridges,
acked_protocol_features: 0,
state: None,
fence_state: Default::default(),
queue_workers: Default::default(),
platform_workers: Default::default(),
backend_req_conn: VhostBackendReqConnectionState::NoConnection,
});
ex.run_until(listener.run_backend(backend, &ex))?
}

View file

@ -0,0 +1,225 @@
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::collections::VecDeque;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use anyhow::bail;
use anyhow::Context;
use argh::FromArgs;
use base::error;
use base::info;
use base::Event;
use base::FromRawDescriptor;
use base::RawDescriptor;
use base::SafeDescriptor;
use base::SendTube;
use base::StreamChannel;
use base::Tube;
use broker_ipc::common_child_setup;
use broker_ipc::CommonChildStartupArgs;
use cros_async::AsyncWrapper;
use cros_async::EventAsync;
use cros_async::Executor;
use cros_async::IoSourceExt;
use futures::future::AbortHandle;
use futures::future::Abortable;
use gpu_display::EventDevice;
use gpu_display::EventDeviceKind;
use hypervisor::ProtectionType;
use serde::Deserialize;
use serde::Serialize;
use sync::Mutex;
use tube_transporter::TubeToken;
use crate::virtio;
use crate::virtio::gpu;
use crate::virtio::gpu::ProcessDisplayResult;
use crate::virtio::vhost::user::device::gpu::GpuBackend;
use crate::virtio::vhost::user::device::handler::sys::windows::read_from_tube_transporter;
use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
use crate::virtio::vhost::user::VhostBackendReqConnectionState;
use crate::virtio::Gpu;
use crate::virtio::GpuDisplayParameters;
use crate::virtio::GpuParameters;
async fn run_display(display: EventAsync, state: Rc<RefCell<gpu::Frontend>>) {
loop {
if let Err(e) = display.next_val().await {
error!(
"Failed to wait for display context to become readable: {}",
e
);
break;
}
match state.borrow_mut().process_display() {
ProcessDisplayResult::Error(e) => {
error!("Failed to process display events: {}", e);
break;
}
ProcessDisplayResult::CloseRequested => break,
ProcessDisplayResult::Success => {}
}
}
}
impl GpuBackend {
pub fn start_platform_workers(&mut self) -> anyhow::Result<()> {
let state = self
.state
.as_ref()
.context("frontend state wasn't set")?
.clone();
// Start handling the display.
// Safe because the raw descriptor is valid, and an event.
let display = unsafe {
EventAsync::clone_raw_without_reset(&*state.borrow_mut().display().borrow(), &self.ex)
}
.context("failed to clone inner WaitContext for gpu display")?;
let (handle, registration) = AbortHandle::new_pair();
self.ex
.spawn_local(Abortable::new(run_display(display, state), registration))
.detach();
self.platform_workers.borrow_mut().push(handle);
return Ok(());
}
}
#[derive(FromArgs)]
/// GPU device
#[argh(subcommand, name = "gpu", description = "")]
pub struct Options {
#[argh(
option,
description = "pipe handle end for Tube Transporter",
arg_name = "HANDLE"
)]
bootstrap: usize,
}
/// Main process end for a GPU device.
#[derive(Deserialize, Serialize)]
pub struct GpuVmmConfig {
// Tube for setting up the vhost-user connection.
pub vhost_user_tube: Option<Tube>,
// Tube for service.
pub gpu_main_service_tube: Option<Tube>,
// Pipes to receive input events on.
pub input_event_device_pipes: VecDeque<(EventDeviceKind, StreamChannel)>,
}
/// Config arguments passed through the bootstrap Tube from the broker to the Gpu backend
/// process.
#[derive(Deserialize, Serialize)]
pub struct GpuBackendConfig {
// An event for an incoming exit request.
pub exit_event: Event,
// A tube to send an exit request.
pub exit_evt_wrtube: SendTube,
// Event devices to send input events to.
pub event_devices: Vec<EventDevice>,
// Tube for service.
pub gpu_device_service_tube: Tube,
// GPU parameters.
pub params: GpuParameters,
}
pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
cros_tracing::init();
let raw_transport_tube = opts.bootstrap as RawDescriptor;
let mut tubes = read_from_tube_transporter(raw_transport_tube)?;
let bootstrap_tube = tubes.get_tube(TubeToken::Bootstrap)?;
let vhost_user_tube = tubes.get_tube(TubeToken::VhostUser)?;
let startup_args: CommonChildStartupArgs = bootstrap_tube.recv::<CommonChildStartupArgs>()?;
let _child_cleanup = common_child_setup(startup_args)?;
let mut config: GpuBackendConfig = bootstrap_tube
.recv()
.context("failed to parse GPU backend config from bootstrap tube")?;
if config.params.display_params.is_empty() {
config
.params
.display_params
.push(GpuDisplayParameters::default());
}
let display_backends = vec![virtio::DisplayBackend::WinApi(
(&config.params.display_params[0]).into(),
)];
let wndproc_thread = virtio::gpu::start_wndproc_thread(
#[cfg(feature = "kiwi")]
config.params.display_params[0]
.gpu_main_display_tube
.clone(),
#[cfg(not(feature = "kiwi"))]
None,
)
.context("failed to start wndproc_thread")?;
// Required to share memory across processes.
let external_blob = true;
let base_features = virtio::base_features(ProtectionType::Unprotected);
let gpu = Rc::new(RefCell::new(Gpu::new(
config.exit_evt_wrtube,
/*resource_bridges=*/ Vec::new(),
display_backends,
&config.params,
#[cfg(feature = "virgl_renderer_next")]
/*render_server_fd=*/
None,
config.event_devices,
external_blob,
base_features,
/*channels=*/ Default::default(),
#[cfg(feature = "kiwi")]
Some(config.gpu_device_service_tube),
wndproc_thread,
)));
let ex = Executor::new().context("failed to create executor")?;
let backend = Box::new(GpuBackend {
ex: ex.clone(),
gpu,
resource_bridges: Default::default(),
acked_protocol_features: 0,
state: None,
fence_state: Default::default(),
queue_workers: Default::default(),
platform_workers: Default::default(),
backend_req_conn: VhostBackendReqConnectionState::NoConnection,
});
let handler = DeviceRequestHandler::new(backend);
// TODO(b/213170185): Uncomment once sandbox is upstreamed.
// if sandbox::is_sandbox_target() {
// sandbox::TargetServices::get()
// .expect("failed to get target services")
// .unwrap()
// .lower_token();
// }
info!("vhost-user gpu device ready, starting run loop...");
if let Err(e) = ex.run_until(handler.run(vhost_user_tube, config.exit_event, &ex)) {
bail!("error occurred: {}", e);
}
Ok(())
}

View file

@ -3,12 +3,18 @@
// found in the LICENSE file.
mod block;
#[cfg(feature = "gpu")]
mod gpu;
mod handler;
mod listener;
pub use block::run_block_device;
pub use block::Options as BlockOptions;
use cros_async::Executor;
#[cfg(feature = "gpu")]
pub use gpu::run_gpu_device;
#[cfg(feature = "gpu")]
pub use gpu::Options as GpuOptions;
pub use handler::VhostBackendReqConnectionState;
pub use handler::VhostUserBackend;
pub use listener::sys::VhostUserListener;
@ -16,8 +22,6 @@ pub use listener::VhostUserListenerTrait;
cfg_if::cfg_if! {
if #[cfg(unix)] {
#[cfg(feature = "gpu")]
mod gpu;
mod console;
#[cfg(feature = "audio")]
mod snd;
@ -34,8 +38,6 @@ cfg_if::cfg_if! {
pub use snd::{run_snd_device, Options as SndOptions};
pub use fs::{run_fs_device, Options as FsOptions};
pub use net::{run_net_device, Options as NetOptions};
#[cfg(feature = "gpu")]
pub use gpu::{run_gpu_device, Options as GpuOptions};
} else if #[cfg(windows)] {
#[cfg(feature = "slirp")]
mod net;

View file

@ -3,6 +3,7 @@
// found in the LICENSE file.
mod block;
mod gpu;
mod handler;
mod virtio_device;
@ -15,13 +16,13 @@ use vmm_vhost::message::VhostUserProtocolFeatures;
use vmm_vhost::Error as VhostError;
pub use self::block::*;
pub use self::gpu::*;
pub use self::handler::VhostUserHandler;
cfg_if::cfg_if! {
if #[cfg(unix)] {
mod console;
mod fs;
mod gpu;
mod mac80211_hwsim;
mod net;
mod snd;
@ -34,7 +35,6 @@ cfg_if::cfg_if! {
pub use self::wl::*;
pub use self::net::*;
pub use self::mac80211_hwsim::*;
pub use self::gpu::*;
pub use self::console::*;
pub use self::fs::*;
pub use self::video::*;

View file

@ -16,12 +16,12 @@ linux_input_sys = { path = "../linux_input_sys" }
remain = "*"
thiserror = "*"
cfg-if = "*"
serde = { version = "1", features = [ "derive" ] }
[target.'cfg(windows)'.dependencies]
anyhow = "*"
metrics = { path = "../metrics" }
num-traits = "*"
serde = { version = "1", features = [ "derive" ] }
winapi = "*"
win_util = { path = "../win_util" }
sync = { path = "../common/sync" }

View file

@ -17,6 +17,8 @@ use base::StreamChannel;
use data_model::DataInit;
use linux_input_sys::virtio_input_event;
use linux_input_sys::InputEventDecoder;
use serde::Deserialize;
use serde::Serialize;
const EVENT_SIZE: usize = virtio_input_event::SIZE;
const EVENT_BUFFER_LEN_MAX: usize = 64 * EVENT_SIZE;
@ -35,7 +37,7 @@ const EVENT_BUFFER_LEN_MAX: usize = 64 * EVENT_SIZE;
// }
// }
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum EventDeviceKind {
/// Produces relative mouse motions, wheel, and button clicks while the real mouse is captured.
Mouse,
@ -46,6 +48,7 @@ pub enum EventDeviceKind {
}
/// Encapsulates a virtual event device, such as a mouse or keyboard
#[derive(Deserialize, Serialize)]
pub struct EventDevice {
kind: EventDeviceKind,
event_buffer: VecDeque<u8>,