devices: virtio: disallow modification of ready queues

Disallow modification of a queue configuration's after it has been
enabled, since the spec states drivers MUST configure the other
virtqueue fields before setting enabling the virtqueue. This allows the
queue validation to be done once and then saved, instead of requiring
validation for every peek.

Before this change, modifications of running virtqueues were not well
defined, since the Queue instance owned by the VirtioPciDevice is not
the same as the Queue instance owned by the VirtioDevice implementation.

BUG=None
TEST=boot ARCVM and crostini on ManaTEE and non-ManaTEE

Change-Id: Ibd1f2bdb0a49865cedd8a0424199a72316696b4d
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3737409
Commit-Queue: David Stevens <stevensd@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
David Stevens 2022-06-30 10:30:14 +09:00 committed by Chromeos LUCI
parent 540872395e
commit 9df3eb7ef2
10 changed files with 132 additions and 84 deletions

View file

@ -71,8 +71,8 @@ fuzz_target!(|bytes| {
}
let mut q = Queue::new(QUEUE_SIZE);
q.ready = true;
q.size = QUEUE_SIZE / 2;
q.set_ready(true);
q.set_size(QUEUE_SIZE / 2);
q.max_size = QUEUE_SIZE;
let queue_evts: Vec<Event> = vec![Event::new().unwrap()];

View file

@ -54,15 +54,19 @@ struct virtq_used {
fuzz_target!(|data: &[u8]| {
let mut q = Queue::new(MAX_QUEUE_SIZE);
let mut rng = FuzzRng::new(data);
q.size = rng.gen();
q.ready = true;
q.set_size(rng.gen());
q.set_ready(true);
// For each of {desc_table,avail_ring,used_ring} generate a random address that includes enough
// space to hold the relevant struct with the largest possible queue size.
let max_table_size = MAX_QUEUE_SIZE as u64 * size_of::<virtq_desc>() as u64;
q.desc_table = GuestAddress(rng.gen_range(0..MEM_SIZE - max_table_size));
q.avail_ring = GuestAddress(rng.gen_range(0..MEM_SIZE - size_of::<virtq_avail>() as u64));
q.used_ring = GuestAddress(rng.gen_range(0..MEM_SIZE - size_of::<virtq_used>() as u64));
q.set_desc_table(GuestAddress(rng.gen_range(0..MEM_SIZE - max_table_size)));
q.set_avail_ring(GuestAddress(
rng.gen_range(0..MEM_SIZE - size_of::<virtq_avail>() as u64),
));
q.set_used_ring(GuestAddress(
rng.gen_range(0..MEM_SIZE - size_of::<virtq_used>() as u64),
));
GUEST_MEM.with(|mem| {
if !q.is_valid(mem) {
@ -76,25 +80,25 @@ fuzz_target!(|data: &[u8]| {
vs.write_bytes(0);
// Fill in the descriptor table.
let queue_size = q.size as usize;
let queue_size = q.size() as usize;
let mut buf = vec![0u8; queue_size * size_of::<virtq_desc>()];
rng.fill_bytes(&mut buf[..]);
mem.write_all_at_addr(&buf[..], q.desc_table).unwrap();
mem.write_all_at_addr(&buf[..], q.desc_table()).unwrap();
// Fill in the available ring. See the definition of virtq_avail above for the source of
// these numbers.
let avail_size = 4 + (queue_size * 2) + 2;
buf.resize(avail_size, 0);
rng.fill_bytes(&mut buf[..]);
mem.write_all_at_addr(&buf[..], q.avail_ring).unwrap();
mem.write_all_at_addr(&buf[..], q.avail_ring()).unwrap();
// Fill in the used ring. See the definition of virtq_used above for the source of
// these numbers.
let used_size = 4 + (queue_size * size_of::<virtq_used_elem>()) + 2;
buf.resize(used_size, 0);
rng.fill_bytes(&mut buf[..]);
mem.write_all_at_addr(&buf[..], q.used_ring).unwrap();
mem.write_all_at_addr(&buf[..], q.used_ring()).unwrap();
while let Some(avail_desc) = q.pop(mem) {
let idx = avail_desc.index;

View file

@ -10,7 +10,7 @@ use std::sync::Arc;
use sync::Mutex;
use anyhow::{bail, Context};
use base::error;
use base::{error, warn};
use cros_async::{AsyncError, EventAsync};
use data_model::{DataInit, Le16, Le32, Le64};
use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
@ -259,22 +259,25 @@ pub struct Queue {
pub max_size: u16,
/// The queue size in elements the driver selected
pub size: u16,
size: u16,
/// Inidcates if the queue is finished with configuration
pub ready: bool,
ready: bool,
/// Indicates that a ready queue's configuration has been validated successfully.
validated: bool,
/// MSI-X vector for the queue. Don't care for INTx
pub vector: u16,
vector: u16,
/// Guest physical address of the descriptor table
pub desc_table: GuestAddress,
desc_table: GuestAddress,
/// Guest physical address of the available ring
pub avail_ring: GuestAddress,
avail_ring: GuestAddress,
/// Guest physical address of the used ring
pub used_ring: GuestAddress,
used_ring: GuestAddress,
pub next_avail: Wrapping<u16>,
pub next_used: Wrapping<u16>,
@ -291,6 +294,22 @@ pub struct Queue {
iommu: Option<Arc<Mutex<IpcMemoryMapper>>>,
}
macro_rules! accessors {
($var:ident, $t:ty, $setter:ident) => {
pub fn $var(&self) -> $t {
self.$var
}
pub fn $setter(&mut self, val: $t) {
if self.ready {
warn!("ignoring write to {} on ready queue", stringify!($var));
return;
}
self.$var = val;
}
};
}
impl Queue {
/// Constructs an empty virtio queue with the given `max_size`.
pub fn new(max_size: u16) -> Queue {
@ -298,6 +317,7 @@ impl Queue {
max_size,
size: max_size,
ready: false,
validated: false,
vector: VIRTIO_MSI_NO_VECTOR,
desc_table: GuestAddress(0),
avail_ring: GuestAddress(0),
@ -311,6 +331,13 @@ impl Queue {
}
}
accessors!(vector, u16, set_vector);
accessors!(size, u16, set_size);
accessors!(ready, bool, set_ready);
accessors!(desc_table, GuestAddress, set_desc_table);
accessors!(avail_ring, GuestAddress, set_avail_ring);
accessors!(used_ring, GuestAddress, set_used_ring);
/// Return the actual size of the queue, as the driver may not set up a
/// queue as big as the device allows.
pub fn actual_size(&self) -> u16 {
@ -320,6 +347,7 @@ impl Queue {
/// Reset queue to a clean state
pub fn reset(&mut self) {
self.ready = false;
self.validated = false;
self.size = self.max_size;
self.vector = VIRTIO_MSI_NO_VECTOR;
self.desc_table = GuestAddress(0);
@ -340,7 +368,19 @@ impl Queue {
self.last_used = Wrapping(0);
}
pub fn is_valid(&self, mem: &GuestMemory) -> bool {
pub fn is_valid(&mut self, mem: &GuestMemory) -> bool {
if !self.ready {
error!("attempt to use virtio queue that is not marked ready");
return false;
}
if !self.validated {
self.validate(mem);
}
self.validated
}
fn validate(&mut self, mem: &GuestMemory) {
let queue_size = self.actual_size() as usize;
let desc_table = self.desc_table;
let desc_table_size = 16 * queue_size;
@ -348,13 +388,9 @@ impl Queue {
let avail_ring_size = 6 + 2 * queue_size;
let used_ring = self.used_ring;
let used_ring_size = 6 + 8 * queue_size;
if !self.ready {
error!("attempt to use virtio queue that is not marked ready");
return false;
} else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
{
if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0 {
error!("virtio queue with invalid size: {}", self.size);
return false;
return;
}
let iommu = self.iommu.as_ref().map(|i| i.lock());
@ -372,16 +408,16 @@ impl Queue {
addr.offset(),
size,
);
return false;
return;
}
}
Err(e) => {
error!("is_valid failed: {:#}", e);
return false;
return;
}
}
}
true
self.validated = true;
}
// Get the index of the first available descriptor chain in the available ring

View file

@ -419,7 +419,7 @@ impl VhostUserSlaveReqHandlerMut for DeviceRequestHandler {
{
return Err(VhostError::InvalidParam);
}
self.vrings[index as usize].queue.size = num as u16;
self.vrings[index as usize].queue.set_size(num as u16);
Ok(())
}
@ -439,9 +439,13 @@ impl VhostUserSlaveReqHandlerMut for DeviceRequestHandler {
let vmm_maps = self.vmm_maps.as_ref().ok_or(VhostError::InvalidParam)?;
let vring = &mut self.vrings[index as usize];
vring.queue.desc_table = vmm_va_to_gpa(vmm_maps, descriptor)?;
vring.queue.avail_ring = vmm_va_to_gpa(vmm_maps, available)?;
vring.queue.used_ring = vmm_va_to_gpa(vmm_maps, used)?;
vring
.queue
.set_desc_table(vmm_va_to_gpa(vmm_maps, descriptor)?);
vring
.queue
.set_avail_ring(vmm_va_to_gpa(vmm_maps, available)?);
vring.queue.set_used_ring(vmm_va_to_gpa(vmm_maps, used)?);
Ok(())
}
@ -485,7 +489,7 @@ impl VhostUserSlaveReqHandlerMut for DeviceRequestHandler {
}
let vring = &mut self.vrings[index as usize];
if vring.queue.ready {
if vring.queue.ready() {
error!("kick fd cannot replaced after queue is started");
return Err(VhostError::InvalidOperation);
}
@ -505,7 +509,7 @@ impl VhostUserSlaveReqHandlerMut for DeviceRequestHandler {
};
let vring = &mut self.vrings[index as usize];
vring.queue.ready = true;
vring.queue.set_ready(true);
let queue = vring.queue.clone();
let doorbell = vring

View file

@ -206,7 +206,7 @@ impl VhostUserSlaveReqHandlerMut for VsockBackend {
// We checked these values already.
let index = index as usize;
let num = num as u16;
self.queues[index].size = num;
self.queues[index].set_size(num);
// The last vq is an event-only vq that is not handled by the kernel.
if index == EVENT_QUEUE {
@ -236,10 +236,10 @@ impl VhostUserSlaveReqHandlerMut for VsockBackend {
let mem = self.mem.as_ref().ok_or(Error::InvalidParam)?;
let maps = self.vmm_maps.as_ref().ok_or(Error::InvalidParam)?;
let mut queue = &mut self.queues[index];
queue.desc_table = vmm_va_to_gpa(maps, descriptor)?;
queue.avail_ring = vmm_va_to_gpa(maps, available)?;
queue.used_ring = vmm_va_to_gpa(maps, used)?;
let queue = &mut self.queues[index];
queue.set_desc_table(vmm_va_to_gpa(maps, descriptor)?);
queue.set_avail_ring(vmm_va_to_gpa(maps, available)?);
queue.set_used_ring(vmm_va_to_gpa(maps, used)?);
let log_addr = if flags.contains(VhostUserVringAddrFlags::VHOST_VRING_F_LOG) {
vmm_va_to_gpa(maps, log).map(Some)?
} else {
@ -257,9 +257,9 @@ impl VhostUserSlaveReqHandlerMut for VsockBackend {
queue.actual_size(),
index,
flags.bits(),
queue.desc_table,
queue.used_ring,
queue.avail_ring,
queue.desc_table(),
queue.used_ring(),
queue.avail_ring(),
log_addr,
)
.map_err(convert_vhost_error)
@ -317,7 +317,7 @@ impl VhostUserSlaveReqHandlerMut for VsockBackend {
return Err(Error::InvalidParam);
}
let queue = &mut self.queues[index];
if queue.ready {
if queue.ready() {
error!("kick fd cannot replaced after queue is started");
return Err(Error::InvalidOperation);
}
@ -432,13 +432,13 @@ impl VhostUserSlaveReqHandlerMut for VsockBackend {
return Err(Error::InvalidParam);
}
self.queues[index as usize].ready = enable;
self.queues[index as usize].set_ready(enable);
if index == (EVENT_QUEUE) as u32 {
return Ok(());
}
if self.queues[..EVENT_QUEUE].iter().all(|q| q.ready) {
if self.queues[..EVENT_QUEUE].iter().all(|q| q.ready()) {
// All queues are ready. Start the device.
self.handle.set_cid(self.cid).map_err(convert_vhost_error)?;
self.handle.start().map_err(convert_vhost_error)

View file

@ -452,10 +452,10 @@ mod test {
}
fn setup_vq(queue: &mut DeviceQueue, addrs: DescTableAddrs) {
queue.desc_table = IOVA(addrs.desc);
queue.avail_ring = IOVA(addrs.avail);
queue.used_ring = IOVA(addrs.used);
queue.ready = true;
queue.set_desc_table(IOVA(addrs.desc));
queue.set_avail_ring(IOVA(addrs.avail));
queue.set_used_ring(IOVA(addrs.used));
queue.set_ready(true);
}
fn device_write(mem: &QueueMemory, q: &mut DeviceQueue, data: &[u8]) -> usize {

View file

@ -180,13 +180,13 @@ impl VhostUserHandler {
queue_size: queue.actual_size(),
flags: 0u32,
desc_table_addr: mem
.get_host_address(queue.desc_table)
.get_host_address(queue.desc_table())
.map_err(Error::GetHostAddress)? as u64,
used_ring_addr: mem
.get_host_address(queue.used_ring)
.get_host_address(queue.used_ring())
.map_err(Error::GetHostAddress)? as u64,
avail_ring_addr: mem
.get_host_address(queue.avail_ring)
.get_host_address(queue.avail_ring())
.map_err(Error::GetHostAddress)? as u64,
log_addr: None,
};
@ -231,7 +231,7 @@ impl VhostUserHandler {
for (queue_index, queue) in queues.iter().enumerate() {
let queue_evt = &queue_evts[queue_index];
let irqfd = msix_config
.get_irqfd(queue.vector as usize)
.get_irqfd(queue.vector() as usize)
.unwrap_or_else(|| interrupt.get_interrupt_evt());
self.activate_vring(&mem, queue_index, queue, queue_evt, irqfd)?;
}

View file

@ -81,16 +81,16 @@ impl<T: Vhost> Worker<T> {
queue.actual_size(),
queue_index,
0,
queue.desc_table,
queue.used_ring,
queue.avail_ring,
queue.desc_table(),
queue.used_ring(),
queue.avail_ring(),
None,
)
.map_err(Error::VhostSetVringAddr)?;
self.vhost_handle
.set_vring_base(queue_index, 0)
.map_err(Error::VhostSetVringBase)?;
self.set_vring_call_for_entry(queue_index, queue.vector as usize)?;
self.set_vring_call_for_entry(queue_index, queue.vector() as usize)?;
self.vhost_handle
.set_vring_kick(queue_index, &queue_evts[queue_index])
.map_err(Error::VhostSetVringKick)?;
@ -141,7 +141,8 @@ impl<T: Vhost> Worker<T> {
self.vhost_interrupt[index]
.read()
.map_err(Error::VhostIrqRead)?;
self.interrupt.signal_used_queue(self.queues[index].vector);
self.interrupt
.signal_used_queue(self.queues[index].vector());
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
@ -156,7 +157,7 @@ impl<T: Vhost> Worker<T> {
Ok(VhostDevRequest::MsixEntryChanged(index)) => {
let mut qindex = 0;
for (queue_index, queue) in self.queues.iter().enumerate() {
if queue.vector == index as u16 {
if queue.vector() == index as u16 {
qindex = queue_index;
break;
}
@ -247,7 +248,7 @@ impl<T: Vhost> Worker<T> {
}
} else {
for (queue_index, queue) in self.queues.iter().enumerate() {
let vector = queue.vector as usize;
let vector = queue.vector() as usize;
if !msix_config.table_masked(vector) {
if let Some(irqfd) = msix_config.get_irqfd(vector) {
self.vhost_handle

View file

@ -122,10 +122,10 @@ impl VirtioPciCommonConfig {
0x10 => self.msix_config,
0x12 => queues.len() as u16, // num_queues
0x16 => self.queue_select,
0x18 => self.with_queue(queues, |q| q.size).unwrap_or(0),
0x1a => self.with_queue(queues, |q| q.vector).unwrap_or(0),
0x18 => self.with_queue(queues, |q| q.size()).unwrap_or(0),
0x1a => self.with_queue(queues, |q| q.vector()).unwrap_or(0),
0x1c => {
if self.with_queue(queues, |q| q.ready).unwrap_or(false) {
if self.with_queue(queues, |q| q.ready()).unwrap_or(false) {
1
} else {
0
@ -140,9 +140,9 @@ impl VirtioPciCommonConfig {
match offset {
0x10 => self.msix_config = value,
0x16 => self.queue_select = value,
0x18 => self.with_queue_mut(queues, |q| q.size = value),
0x1a => self.with_queue_mut(queues, |q| q.vector = value),
0x1c => self.with_queue_mut(queues, |q| q.ready = value == 1),
0x18 => self.with_queue_mut(queues, |q| q.set_size(value)),
0x1a => self.with_queue_mut(queues, |q| q.set_vector(value)),
0x1c => self.with_queue_mut(queues, |q| q.set_ready(value == 1)),
_ => {
warn!("invalid virtio register word write: 0x{:x}", offset);
}
@ -173,12 +173,15 @@ impl VirtioPciCommonConfig {
queues: &mut [Queue],
device: &mut dyn VirtioDevice,
) {
fn hi(v: &mut GuestAddress, x: u32) {
*v = (*v & 0xffffffff) | ((x as u64) << 32)
macro_rules! hi {
($q:expr, $get:ident, $set:ident, $x:expr) => {
$q.$set(($q.$get() & 0xffffffff) | (($x as u64) << 32))
};
}
fn lo(v: &mut GuestAddress, x: u32) {
*v = (*v & !0xffffffff) | (x as u64)
macro_rules! lo {
($q:expr, $get:ident, $set:ident, $x:expr) => {
$q.$set(($q.$get() & !0xffffffff) | ($x as u64))
};
}
match offset {
@ -198,12 +201,12 @@ impl VirtioPciCommonConfig {
);
}
}
0x20 => self.with_queue_mut(queues, |q| lo(&mut q.desc_table, value)),
0x24 => self.with_queue_mut(queues, |q| hi(&mut q.desc_table, value)),
0x28 => self.with_queue_mut(queues, |q| lo(&mut q.avail_ring, value)),
0x2c => self.with_queue_mut(queues, |q| hi(&mut q.avail_ring, value)),
0x30 => self.with_queue_mut(queues, |q| lo(&mut q.used_ring, value)),
0x34 => self.with_queue_mut(queues, |q| hi(&mut q.used_ring, value)),
0x20 => self.with_queue_mut(queues, |q| lo!(q, desc_table, set_desc_table, value)),
0x24 => self.with_queue_mut(queues, |q| hi!(q, desc_table, set_desc_table, value)),
0x28 => self.with_queue_mut(queues, |q| lo!(q, avail_ring, set_avail_ring, value)),
0x2c => self.with_queue_mut(queues, |q| hi!(q, avail_ring, set_avail_ring, value)),
0x30 => self.with_queue_mut(queues, |q| lo!(q, used_ring, set_used_ring, value)),
0x34 => self.with_queue_mut(queues, |q| hi!(q, used_ring, set_used_ring, value)),
_ => {
warn!("invalid virtio register dword write: 0x{:x}", offset);
}
@ -216,9 +219,9 @@ impl VirtioPciCommonConfig {
fn write_common_config_qword(&mut self, offset: u64, value: u64, queues: &mut [Queue]) {
match offset {
0x20 => self.with_queue_mut(queues, |q| q.desc_table = GuestAddress(value)),
0x28 => self.with_queue_mut(queues, |q| q.avail_ring = GuestAddress(value)),
0x30 => self.with_queue_mut(queues, |q| q.used_ring = GuestAddress(value)),
0x20 => self.with_queue_mut(queues, |q| q.set_desc_table(GuestAddress(value))),
0x28 => self.with_queue_mut(queues, |q| q.set_avail_ring(GuestAddress(value))),
0x30 => self.with_queue_mut(queues, |q| q.set_used_ring(GuestAddress(value))),
_ => {
warn!("invalid virtio register qword write: 0x{:x}", offset);
}

View file

@ -351,11 +351,11 @@ impl VirtioPciDevice {
self.common_config.driver_status == DEVICE_RESET as u8
}
fn are_queues_valid(&self) -> bool {
fn are_queues_valid(&mut self) -> bool {
// All queues marked as ready must be valid.
self.queues
.iter()
.filter(|q| q.ready)
.iter_mut()
.filter(|q| q.ready())
.all(|q| q.is_valid(&self.mem))
}
@ -459,7 +459,7 @@ impl VirtioPciDevice {
.clone()
.into_iter()
.zip(queue_evts.into_iter())
.filter(|(q, _)| q.ready)
.filter(|(q, _)| q.ready())
.unzip();
self.device.activate(mem, interrupt, queues, queue_evts);