mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2025-01-12 16:45:31 +00:00
crosvm: add virtio module with queue module
The queue module is for navigating virtio queues using guest memory. It is the basis of the virtio protocol post-configuration. TEST=None BUG=None Change-Id: I2e6f9a1398ee06f3e766fe5edcb1c4283ebe7be8 Reviewed-on: https://chromium-review.googlesource.com/514686 Commit-Ready: Zach Reizner <zachr@chromium.org> Tested-by: Zach Reizner <zachr@chromium.org> Reviewed-by: Zach Reizner <zachr@chromium.org>
This commit is contained in:
parent
c1c23a86e1
commit
f61e803c48
3 changed files with 317 additions and 0 deletions
|
@ -9,6 +9,7 @@ mod serial;
|
|||
mod i8042;
|
||||
mod bus;
|
||||
mod proxy;
|
||||
pub mod virtio;
|
||||
|
||||
pub use self::cmos::Cmos;
|
||||
pub use self::serial::Serial;
|
||||
|
|
9
src/hw/virtio/mod.rs
Normal file
9
src/hw/virtio/mod.rs
Normal file
|
@ -0,0 +1,9 @@
|
|||
// Copyright 2017 The Chromium OS Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//! Implements virtio devices, queues, and transport mechanisms.
|
||||
|
||||
mod queue;
|
||||
|
||||
pub use self::queue::*;
|
307
src/hw/virtio/queue.rs
Normal file
307
src/hw/virtio/queue.rs
Normal file
|
@ -0,0 +1,307 @@
|
|||
// Copyright 2017 The Chromium OS Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
use std::cmp::min;
|
||||
use std::result;
|
||||
|
||||
use sys_util::{GuestAddress, GuestMemory};
|
||||
|
||||
const VIRTQ_DESC_F_NEXT: u16 = 0x1;
|
||||
const VIRTQ_DESC_F_WRITE: u16 = 0x2;
|
||||
#[allow(dead_code)]
|
||||
const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum Error {
|
||||
RequestOutOfBounds,
|
||||
SectorOutOfBounds,
|
||||
}
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
/// A virtio descriptor chain.
|
||||
pub struct DescriptorChain<'a> {
|
||||
mem: &'a GuestMemory,
|
||||
desc_table: GuestAddress,
|
||||
queue_size: u16,
|
||||
ttl: u16, // used to prevent infinite chain cycles
|
||||
|
||||
/// Index into the descriptor table
|
||||
pub index: u16,
|
||||
|
||||
/// Guest physical address of device specific data
|
||||
pub addr: GuestAddress,
|
||||
|
||||
/// Length of device specific data
|
||||
pub len: u32,
|
||||
|
||||
/// Includes next, write, and indirect bits
|
||||
pub flags: u16,
|
||||
|
||||
/// Index into the descriptor table of the next descriptor if flags has
|
||||
/// the next bit set
|
||||
pub next: u16,
|
||||
}
|
||||
|
||||
impl<'a> DescriptorChain<'a> {
|
||||
fn checked_new(mem: &GuestMemory,
|
||||
desc_table: GuestAddress,
|
||||
queue_size: u16,
|
||||
index: u16)
|
||||
-> Option<DescriptorChain> {
|
||||
if index >= queue_size {
|
||||
return None;
|
||||
}
|
||||
|
||||
let desc_head = match mem.checked_offset(desc_table, (index as usize) * 16) {
|
||||
Some(a) => a,
|
||||
None => return None,
|
||||
};
|
||||
// These reads can't fail unless Guest memory is hopelessly broken.
|
||||
let addr = GuestAddress(mem.read_obj_from_addr::<u64>(desc_head).unwrap() as usize);
|
||||
if mem.checked_offset(desc_head, 16).is_none() {
|
||||
return None;
|
||||
}
|
||||
let len: u32 = mem.read_obj_from_addr(desc_head.unchecked_add(8))
|
||||
.unwrap();
|
||||
let flags: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(12))
|
||||
.unwrap();
|
||||
let next: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(14))
|
||||
.unwrap();
|
||||
let chain = DescriptorChain {
|
||||
mem: mem,
|
||||
desc_table: desc_table,
|
||||
queue_size: queue_size,
|
||||
ttl: queue_size,
|
||||
index: index,
|
||||
addr: addr,
|
||||
len: len,
|
||||
flags: flags,
|
||||
next: next,
|
||||
};
|
||||
|
||||
if chain.is_valid() { Some(chain) } else { None }
|
||||
}
|
||||
|
||||
fn is_valid(&self) -> bool {
|
||||
if self.mem
|
||||
.checked_offset(self.addr, self.len as usize)
|
||||
.is_none() {
|
||||
false
|
||||
} else if self.has_next() && self.next >= self.queue_size {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets if this descriptor chain has another descriptor chain linked after it.
|
||||
pub fn has_next(&self) -> bool {
|
||||
self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
|
||||
}
|
||||
|
||||
/// If the driver designated this as a write only descriptor.
|
||||
///
|
||||
/// If this is false, this descriptor is read only.
|
||||
pub fn is_write_only(&self) -> bool {
|
||||
self.flags & VIRTQ_DESC_F_WRITE != 0
|
||||
}
|
||||
|
||||
/// Gets the next descriptor in this descriptor chain, if there is one.
|
||||
///
|
||||
/// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
|
||||
/// the head of the next _available_ descriptor chain.
|
||||
pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
|
||||
if self.has_next() {
|
||||
DescriptorChain::checked_new(self.mem, self.desc_table, self.queue_size, self.next)
|
||||
.map(|mut c| {
|
||||
c.ttl = self.ttl - 1;
|
||||
c
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Consuming iterator over all available descriptor chain heads in the queue.
|
||||
pub struct AvailIter<'a, 'b> {
|
||||
mem: &'a GuestMemory,
|
||||
desc_table: GuestAddress,
|
||||
avail_ring: GuestAddress,
|
||||
next_index: usize,
|
||||
last_index: usize,
|
||||
queue_size: usize,
|
||||
next_avail: &'b mut u16,
|
||||
}
|
||||
|
||||
impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
|
||||
type Item = DescriptorChain<'a>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.next_index == self.last_index {
|
||||
return None;
|
||||
}
|
||||
|
||||
let avail_addr = match self.mem
|
||||
.checked_offset(self.avail_ring, 4 + self.next_index * 2) {
|
||||
Some(a) => a,
|
||||
None => return None,
|
||||
};
|
||||
// This index is checked below in checked_new
|
||||
let desc_index: u16 = self.mem.read_obj_from_addr(avail_addr).unwrap();
|
||||
|
||||
self.next_index += 1;
|
||||
self.next_index %= self.queue_size;
|
||||
|
||||
let ret = DescriptorChain::checked_new(self.mem,
|
||||
self.desc_table,
|
||||
self.queue_size as u16,
|
||||
desc_index);
|
||||
if ret.is_some() {
|
||||
*self.next_avail += 1;
|
||||
*self.next_avail %= self.queue_size as u16;
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// A virtio queue's parameters.
|
||||
pub struct Queue {
|
||||
/// The maximal size in elements offered by the device
|
||||
pub max_size: u16,
|
||||
|
||||
/// The queue size in elements the driver selected
|
||||
pub size: u16,
|
||||
|
||||
/// Inidcates if the queue is finished with configuration
|
||||
pub ready: bool,
|
||||
|
||||
/// Guest physical address of the descriptor table
|
||||
pub desc_table: GuestAddress,
|
||||
|
||||
/// Guest physical address of the available ring
|
||||
pub avail_ring: GuestAddress,
|
||||
|
||||
/// Guest physical address of the used ring
|
||||
pub used_ring: GuestAddress,
|
||||
|
||||
next_avail: u16,
|
||||
next_used: u16,
|
||||
}
|
||||
|
||||
impl Queue {
|
||||
/// Constructs an empty virtio queue with the given `max_size`.
|
||||
pub fn new(max_size: u16) -> Queue {
|
||||
Queue {
|
||||
max_size: max_size,
|
||||
size: 0,
|
||||
ready: false,
|
||||
desc_table: GuestAddress(0),
|
||||
avail_ring: GuestAddress(0),
|
||||
used_ring: GuestAddress(0),
|
||||
next_avail: 0,
|
||||
next_used: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn actual_size(&self) -> u16 {
|
||||
min(self.size, self.max_size)
|
||||
}
|
||||
|
||||
fn is_valid(&self, mem: &GuestMemory) -> bool {
|
||||
let queue_size = self.actual_size() as usize;
|
||||
let desc_table = self.desc_table;
|
||||
let desc_table_size = 16 * queue_size;
|
||||
let avail_ring = self.avail_ring;
|
||||
let avail_ring_size = 6 + 2 * queue_size;
|
||||
let used_ring = self.used_ring;
|
||||
let used_ring_size = 6 + 8 * queue_size;
|
||||
if !self.ready {
|
||||
println!("error: attempt to use virtio queue that is not marked ready");
|
||||
false
|
||||
} else if self.size > self.max_size || self.size == 0 ||
|
||||
(self.size & (self.size - 1)) != 0 {
|
||||
println!("error: virtio queue with invalid size: {}", self.size);
|
||||
false
|
||||
} else if desc_table
|
||||
.checked_add(desc_table_size)
|
||||
.map_or(true, |v| !mem.address_in_range(v)) {
|
||||
println!("error: virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
|
||||
desc_table.offset(),
|
||||
desc_table_size);
|
||||
false
|
||||
} else if avail_ring
|
||||
.checked_add(avail_ring_size)
|
||||
.map_or(true, |v| !mem.address_in_range(v)) {
|
||||
println!("error: virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
|
||||
avail_ring.offset(),
|
||||
avail_ring_size);
|
||||
false
|
||||
} else if used_ring
|
||||
.checked_add(used_ring_size)
|
||||
.map_or(true, |v| !mem.address_in_range(v)) {
|
||||
println!("error: virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
|
||||
used_ring.offset(),
|
||||
used_ring_size);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// A consuming iterator over all available descriptor chain heads offered by the driver.
|
||||
pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b> {
|
||||
if !self.is_valid(mem) {
|
||||
return AvailIter {
|
||||
mem: mem,
|
||||
desc_table: GuestAddress(0),
|
||||
avail_ring: GuestAddress(0),
|
||||
next_index: 0,
|
||||
last_index: 0,
|
||||
queue_size: 0,
|
||||
next_avail: &mut self.next_avail,
|
||||
};
|
||||
}
|
||||
let queue_size = self.actual_size();
|
||||
let avail_ring = self.avail_ring;
|
||||
|
||||
let index_addr = mem.checked_offset(avail_ring, 2).unwrap();
|
||||
// Note that last_index has no invalid values
|
||||
let last_index: u16 = mem.read_obj_from_addr::<u16>(index_addr).unwrap() % queue_size;
|
||||
AvailIter {
|
||||
mem: mem,
|
||||
desc_table: self.desc_table,
|
||||
avail_ring: avail_ring,
|
||||
next_index: self.next_avail as usize,
|
||||
last_index: last_index as usize,
|
||||
queue_size: queue_size as usize,
|
||||
next_avail: &mut self.next_avail,
|
||||
}
|
||||
}
|
||||
|
||||
/// Puts an available descriptor head into the used ring for use by the guest.
|
||||
pub fn add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32) {
|
||||
if desc_index >= self.actual_size() {
|
||||
println!("error: attempted to add out of bounds descriptor to used ring: {}",
|
||||
desc_index);
|
||||
return;
|
||||
}
|
||||
|
||||
let used_ring = self.used_ring;
|
||||
let next_used = (self.next_used % self.actual_size()) as usize;
|
||||
let used_elem = used_ring.unchecked_add(4 + next_used * 8);
|
||||
|
||||
// These writes can't fail as we are guaranteed to be within the descriptor ring.
|
||||
mem.write_obj_at_addr(desc_index as u32, used_elem)
|
||||
.unwrap();
|
||||
mem.write_obj_at_addr(len as u32, used_elem.unchecked_add(4))
|
||||
.unwrap();
|
||||
|
||||
self.next_used = self.next_used.wrapping_add(1);
|
||||
mem.write_obj_at_addr(self.next_used as u16, used_ring.unchecked_add(2))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue