devices: virtio: iommu: add socket between main process and virtio-iommu

A tube channel is needed for the communication between main process and
virtio-iommu worker thread, e.g. passing a vfio container file
descriptor of a hot-pluggable device to the virtio-iommu backend which
is in charge of the mapping/unmapping for the endpoint.

BUG=b:185084350
TEST=Boot a guest with no error

Change-Id: Ib5061e795227040ca400bc9a92df84a27cd26438
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3301703
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Commit-Queue: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
Haiwei Li 2022-02-17 13:53:29 +08:00 committed by Commit Bot
parent 0ae13d221e
commit dbba029d13
4 changed files with 72 additions and 26 deletions

View file

@ -22,6 +22,7 @@ use futures::{select, FutureExt};
use remain::sorted;
use sync::Mutex;
use thiserror::Error;
use vm_control::VirtioIOMMURequest;
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
use crate::pci::PciAddress;
@ -123,6 +124,10 @@ pub enum IommuError {
Tube(TubeError),
#[error("unexpected descriptor error")]
UnexpectedDescriptor,
#[error("failed to receive virtio-iommu control request: {0}")]
VirtioIOMMUReqError(TubeError),
#[error("failed to send virtio-iommu control response: {0}")]
VirtioIOMMUResponseError(TubeError),
#[error("failed to wait for events: {0}")]
WaitError(SysError),
#[error("write buffer length too small")]
@ -471,8 +476,23 @@ impl Worker {
}
}
// Async task that handles messages from the host
pub async fn handle_command_tube(command_tube: &AsyncTube) -> Result<()> {
loop {
match command_tube.next::<VirtioIOMMURequest>().await {
Ok(_) => {
// To-Do: handle the requests from virtio-iommu tube
}
Err(e) => {
return Err(IommuError::VirtioIOMMUReqError(e));
}
}
}
}
fn run(
&mut self,
iommu_device_tube: Tube,
mut queues: Vec<Queue>,
queue_evts: Vec<Event>,
kill_evt: Event,
@ -517,12 +537,18 @@ impl Worker {
let f_handle_translate_request =
handle_translate_request(&endpoints, request_tube, response_tubes);
let f_request = self.request_queue(req_queue, req_evt, interrupt_ref, &endpoints);
let command_tube = iommu_device_tube.into_async_tube(&ex).unwrap();
// Future to handle command messages from host, such as passing vfio containers.
let f_cmd = Self::handle_command_tube(&command_tube);
let done = async {
select! {
res = f_request.fuse() => res.context("error in handling request queue"),
res = f_resample.fuse() => res.context("error in handle_irq_resample"),
res = f_kill.fuse() => res.context("error in await_and_exit"),
res = f_handle_translate_request.fuse() => res.context("error in handle_translate_request"),
res = f_cmd.fuse() => res.context("error in handling host request"),
}
};
match ex.run_until(done) {
@ -584,6 +610,7 @@ pub struct Iommu {
endpoints: BTreeMap<u32, Arc<Mutex<Box<dyn MemoryMapperTrait>>>>,
translate_response_senders: Option<BTreeMap<u32, Tube>>,
translate_request_rx: Option<Tube>,
iommu_device_tube: Option<Tube>,
}
impl Iommu {
@ -594,6 +621,7 @@ impl Iommu {
phys_max_addr: u64,
translate_response_senders: Option<BTreeMap<u32, Tube>>,
translate_request_rx: Option<Tube>,
iommu_device_tube: Option<Tube>,
) -> SysResult<Iommu> {
let mut page_size_mask = !0_u64;
for (_, container) in endpoints.iter() {
@ -637,6 +665,7 @@ impl Iommu {
endpoints,
translate_response_senders,
translate_request_rx,
iommu_device_tube,
})
}
}
@ -718,33 +747,42 @@ impl VirtioDevice for Iommu {
let translate_response_senders = self.translate_response_senders.take();
let translate_request_rx = self.translate_request_rx.take();
let worker_result = thread::Builder::new()
.name("virtio_iommu".to_string())
.spawn(move || {
let mut worker = Worker {
mem,
page_mask,
endpoint_map: BTreeMap::new(),
domain_map: BTreeMap::new(),
};
let result = worker.run(
queues,
queue_evts,
kill_evt,
interrupt,
eps,
translate_response_senders,
translate_request_rx,
);
if let Err(e) = result {
error!("virtio-iommu worker thread exited with error: {}", e);
}
worker
});
match self.iommu_device_tube.take() {
Some(iommu_device_tube) => {
let worker_result = thread::Builder::new()
.name("virtio_iommu".to_string())
.spawn(move || {
let mut worker = Worker {
mem,
page_mask,
endpoint_map: BTreeMap::new(),
domain_map: BTreeMap::new(),
};
let result = worker.run(
iommu_device_tube,
queues,
queue_evts,
kill_evt,
interrupt,
eps,
translate_response_senders,
translate_request_rx,
);
if let Err(e) = result {
error!("virtio-iommu worker thread exited with error: {}", e);
}
worker
});
match worker_result {
Err(e) => error!("failed to spawn virtio_iommu worker thread: {}", e),
Ok(join_handle) => self.worker_thread = Some(join_handle),
match worker_result {
Err(e) => error!("failed to spawn virtio_iommu worker thread: {}", e),
Ok(join_handle) => self.worker_thread = Some(join_handle),
}
}
None => {
error!("failed to start virtio-iommu worker: No control tube");
return;
}
}
}

View file

@ -996,6 +996,7 @@ pub fn create_iommu_device(
endpoints: BTreeMap<u32, Arc<Mutex<Box<dyn MemoryMapperTrait>>>>,
translate_response_senders: Option<BTreeMap<u32, Tube>>,
translate_request_rx: Option<Tube>,
iommu_device_tube: Tube,
) -> DeviceResult {
let dev = virtio::Iommu::new(
virtio::base_features(cfg.protected_vm),
@ -1003,6 +1004,7 @@ pub fn create_iommu_device(
phys_max_addr,
translate_response_senders,
translate_request_rx,
Some(iommu_device_tube),
)
.context("failed to create IOMMU device")?;

View file

@ -615,12 +615,15 @@ fn create_devices(
setup_virtio_access_platform(resources, &mut iommu_attached_endpoints, &mut devices)?;
if !iommu_attached_endpoints.is_empty() {
let (_iommu_host_tube, iommu_device_tube) =
Tube::pair().context("failed to create tube")?;
let iommu_dev = create_iommu_device(
cfg,
phys_max_addr,
iommu_attached_endpoints,
translate_response_senders,
request_rx,
iommu_device_tube,
)?;
let (msi_host_tube, msi_device_tube) = Tube::pair().context("failed to create tube")?;

View file

@ -1190,3 +1190,6 @@ impl Display for VmResponse {
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum VirtioIOMMURequest {}