devices: virtio: remove virtio MMIO transport

All devices have been converted to PCI, so we don't need MmioDevice.

BUG=chromium:854766
TEST=Boot crosvm on kevin and verify virtio devices still work

Change-Id: Ib6400e15bdb2153d14795de3cb0bfbf1845a8891
Signed-off-by: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/1281832
Reviewed-by: Dylan Reid <dgreid@chromium.org>
This commit is contained in:
Daniel Verkamp 2018-10-15 14:32:30 -07:00 committed by chrome-bot
parent 5656c124af
commit 402d53bba8
5 changed files with 3 additions and 328 deletions

View file

@ -36,7 +36,6 @@ use AARCH64_SERIAL_SPEED;
// These are related to guest virtio devices.
use AARCH64_IRQ_BASE;
use AARCH64_MMIO_BASE;
use AARCH64_MMIO_LEN;
use AARCH64_MMIO_SIZE;
use AARCH64_PCI_CFG_BASE;
use AARCH64_PCI_CFG_SIZE;
@ -368,30 +367,6 @@ fn create_chosen_node(fdt: &mut Vec<u8>, cmdline: &CStr) -> Result<(), Box<Error
Ok(())
}
fn create_io_nodes(fdt: &mut Vec<u8>) -> Result<(), Box<Error>> {
// TODO(sonnyrao) Pass in bus to get number of devices
// HACK -- this is creating a static number of device nodes
// the unused nodes just throw a warning when the guest boots
for i in 0..8 {
let addr = AARCH64_MMIO_BASE + i * AARCH64_MMIO_LEN;
let node = format!("virtio@{:x}", addr);
let reg = generate_prop64(&[addr, AARCH64_MMIO_LEN]);
let irq = generate_prop32(&[
GIC_FDT_IRQ_TYPE_SPI,
AARCH64_IRQ_BASE + i as u32,
IRQ_TYPE_EDGE_RISING,
]);
begin_node(fdt, &node)?;
property_string(fdt, "compatible", "virtio,mmio")?;
property(fdt, "reg", &reg)?;
property_null(fdt, "dma-coherent")?;
property(fdt, "interrupts", &irq)?;
end_node(fdt)?;
}
Ok(())
}
fn create_pci_nodes(
fdt: &mut Vec<u8>,
pci_irqs: Vec<(u32, PciInterruptPin)>,
@ -539,12 +514,7 @@ pub fn create_fdt(
create_timer_node(&mut fdt, num_cpus)?;
create_serial_node(&mut fdt)?;
create_psci_node(&mut fdt)?;
// TODO(dverkamp): remove create_io_nodes() once the PCI conversion is complete
if !pci_irqs.is_empty() {
create_pci_nodes(&mut fdt, pci_irqs)?;
} else {
create_io_nodes(&mut fdt)?;
}
create_pci_nodes(&mut fdt, pci_irqs)?;
create_rtc_node(&mut fdt)?;
// End giant node
end_node(&mut fdt)?;

View file

@ -111,8 +111,6 @@ const AARCH64_PCI_CFG_SIZE: u64 = 0x1000000;
const AARCH64_MMIO_BASE: u64 = 0x1010000;
// Size of the whole MMIO region.
const AARCH64_MMIO_SIZE: u64 = 0x100000;
// Each MMIO device gets a 4k page.
const AARCH64_MMIO_LEN: u64 = 0x1000;
// Virtio devices start at SPI interrupt number 2
const AARCH64_IRQ_BASE: u32 = 2;

View file

@ -12,7 +12,7 @@ extern crate sys_util;
use std::fmt;
use std::fs::File;
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::io::AsRawFd;
use std::result;
use std::sync::{Arc, Mutex};
@ -22,7 +22,7 @@ use devices::{
Serial,
};
use io_jail::Minijail;
use kvm::{Datamatch, IoeventAddress, Kvm, Vcpu, Vm};
use kvm::{IoeventAddress, Kvm, Vcpu, Vm};
use resources::SystemAllocator;
use sys_util::{syslog, EventFd, GuestMemory};
@ -192,64 +192,3 @@ pub fn generate_pci_root(
}
Ok((root, pci_irqs))
}
/// Register a device to be used via MMIO transport.
pub fn register_mmio(
bus: &mut devices::Bus,
vm: &mut Vm,
device: Box<devices::virtio::VirtioDevice>,
jail: Option<Minijail>,
resources: &mut SystemAllocator,
cmdline: &mut kernel_cmdline::Cmdline,
) -> std::result::Result<(), DeviceRegistrationError> {
let irq = match resources.allocate_irq() {
None => return Err(DeviceRegistrationError::IrqsExhausted),
Some(i) => i,
};
// List of FDs to keep open in the child after it forks.
let mut keep_fds: Vec<RawFd> = device.keep_fds();
syslog::push_fds(&mut keep_fds);
let mmio_device = devices::virtio::MmioDevice::new((*vm.get_memory()).clone(), device)
.map_err(DeviceRegistrationError::CreateMmioDevice)?;
let mmio_len = 0x1000; // TODO(dgreid) - configurable per arch?
let mmio_base = resources
.allocate_mmio_addresses(mmio_len)
.ok_or(DeviceRegistrationError::AddrsExhausted)?;
for (i, queue_evt) in mmio_device.queue_evts().iter().enumerate() {
let io_addr = IoeventAddress::Mmio(mmio_base + devices::virtio::NOTIFY_REG_OFFSET as u64);
vm.register_ioevent(&queue_evt, io_addr, Datamatch::U32(Some(i as u32)))
.map_err(DeviceRegistrationError::RegisterIoevent)?;
keep_fds.push(queue_evt.as_raw_fd());
}
if let Some(interrupt_evt) = mmio_device.interrupt_evt() {
vm.register_irqfd(&interrupt_evt, irq)
.map_err(DeviceRegistrationError::RegisterIrqfd)?;
keep_fds.push(interrupt_evt.as_raw_fd());
}
if let Some(jail) = jail {
let proxy_dev = devices::ProxyDevice::new(mmio_device, &jail, keep_fds)
.map_err(DeviceRegistrationError::ProxyDeviceCreation)?;
bus.insert(Arc::new(Mutex::new(proxy_dev)), mmio_base, mmio_len, false)
.unwrap();
} else {
bus.insert(
Arc::new(Mutex::new(mmio_device)),
mmio_base,
mmio_len,
false,
).unwrap();
}
cmdline
.insert(
"virtio_mmio.device",
&format!("4K@0x{:08x}:{}", mmio_base, irq),
).map_err(DeviceRegistrationError::Cmdline)?;
Ok(())
}

View file

@ -1,230 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use byteorder::{ByteOrder, LittleEndian};
use super::*;
use sys_util::{EventFd, GuestAddress, GuestMemory, Result};
use BusDevice;
const VENDOR_ID: u32 = 0;
const MMIO_MAGIC_VALUE: u32 = 0x74726976;
const MMIO_VERSION: u32 = 2;
/// Implements the
/// [MMIO](http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-1090002)
/// transport for virtio devices.
///
/// This requires 3 points of installation to work with a VM:
///
/// 1. Mmio reads and writes must be sent to this device at what is referred to here as MMIO base.
/// 1. `Mmio::queue_evts` must be installed at `virtio::NOTIFY_REG_OFFSET` offset from the MMIO
/// base. Each event in the array must be signaled if the index is written at that offset.
/// 1. `Mmio::interrupt_evt` must signal an interrupt that the guest driver is listening to when it
/// is written to.
///
/// Typically one page (4096 bytes) of MMIO address space is sufficient to handle this transport
/// and inner virtio device.
pub struct MmioDevice {
device: Box<VirtioDevice>,
device_activated: bool,
features_select: u32,
acked_features_select: u32,
queue_select: u32,
interrupt_status: Arc<AtomicUsize>,
interrupt_evt: Option<EventFd>,
interrupt_resample_evt: Option<EventFd>,
driver_status: u32,
config_generation: u32,
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,
mem: Option<GuestMemory>,
}
impl MmioDevice {
/// Constructs a new MMIO transport for the given virtio device.
pub fn new(mem: GuestMemory, device: Box<VirtioDevice>) -> Result<MmioDevice> {
let mut queue_evts = Vec::new();
for _ in device.queue_max_sizes().iter() {
queue_evts.push(EventFd::new()?)
}
let queues = device
.queue_max_sizes()
.iter()
.map(|&s| Queue::new(s))
.collect();
Ok(MmioDevice {
device,
device_activated: false,
features_select: 0,
acked_features_select: 0,
queue_select: 0,
interrupt_status: Arc::new(AtomicUsize::new(0)),
interrupt_evt: Some(EventFd::new()?),
interrupt_resample_evt: Some(EventFd::new()?),
driver_status: 0,
config_generation: 0,
queues,
queue_evts,
mem: Some(mem),
})
}
/// Gets the list of queue events that must be triggered whenever the VM writes to
/// `virtio::NOTIFY_REG_OFFSET` past the MMIO base. Each event must be triggered when the
/// value being written equals the index of the event in this list.
pub fn queue_evts(&self) -> &[EventFd] {
self.queue_evts.as_slice()
}
/// Gets the event this device uses to interrupt the VM when the used queue is changed.
pub fn interrupt_evt(&self) -> Option<&EventFd> {
self.interrupt_evt.as_ref()
}
fn is_driver_ready(&self) -> bool {
let ready_bits = DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK;
self.driver_status == ready_bits && self.driver_status & DEVICE_FAILED == 0
}
fn are_queues_valid(&self) -> bool {
if let Some(mem) = self.mem.as_ref() {
self.queues.iter().all(|q| q.is_valid(mem))
} else {
false
}
}
fn with_queue<U, F>(&self, d: U, f: F) -> U
where
F: FnOnce(&Queue) -> U,
{
match self.queues.get(self.queue_select as usize) {
Some(queue) => f(queue),
None => d,
}
}
fn with_queue_mut<F: FnOnce(&mut Queue)>(&mut self, f: F) -> bool {
if let Some(queue) = self.queues.get_mut(self.queue_select as usize) {
f(queue);
true
} else {
false
}
}
}
impl BusDevice for MmioDevice {
fn read(&mut self, offset: u64, data: &mut [u8]) {
match offset {
0x00...0xff if data.len() == 4 => {
let v = match offset {
0x0 => MMIO_MAGIC_VALUE,
0x04 => MMIO_VERSION,
0x08 => self.device.device_type(),
0x0c => VENDOR_ID, // vendor id
0x10 => {
self.device.features(self.features_select)
| if self.features_select == 1 { 0x1 } else { 0x0 }
}
0x34 => self.with_queue(0, |q| q.max_size as u32),
0x44 => self.with_queue(0, |q| q.ready as u32),
0x60 => self.interrupt_status.load(Ordering::SeqCst) as u32,
0x70 => self.driver_status,
0xfc => self.config_generation,
_ => {
warn!("unknown virtio mmio register read: 0x{:x}", offset);
return;
}
};
LittleEndian::write_u32(data, v);
}
0x100...0xfff => self.device.read_config(offset - 0x100, data),
_ => {
warn!(
"invalid virtio mmio read: 0x{:x}:0x{:x}",
offset,
data.len()
);
}
};
}
fn write(&mut self, offset: u64, data: &[u8]) {
fn hi(v: &mut GuestAddress, x: u32) {
*v = (*v & 0xffffffff) | ((x as u64) << 32)
}
fn lo(v: &mut GuestAddress, x: u32) {
*v = (*v & !0xffffffff) | (x as u64)
}
let mut mut_q = false;
match offset {
0x00...0xff if data.len() == 4 => {
let v = LittleEndian::read_u32(data);
match offset {
0x14 => self.features_select = v,
0x20 => self.device.ack_features(self.acked_features_select, v),
0x24 => self.acked_features_select = v,
0x30 => self.queue_select = v,
0x38 => mut_q = self.with_queue_mut(|q| q.size = v as u16),
0x44 => mut_q = self.with_queue_mut(|q| q.ready = v == 1),
0x64 => {
self.interrupt_status
.fetch_and(!(v as usize), Ordering::SeqCst);
}
0x70 => self.driver_status = v,
0x80 => mut_q = self.with_queue_mut(|q| lo(&mut q.desc_table, v)),
0x84 => mut_q = self.with_queue_mut(|q| hi(&mut q.desc_table, v)),
0x90 => mut_q = self.with_queue_mut(|q| lo(&mut q.avail_ring, v)),
0x94 => mut_q = self.with_queue_mut(|q| hi(&mut q.avail_ring, v)),
0xa0 => mut_q = self.with_queue_mut(|q| lo(&mut q.used_ring, v)),
0xa4 => mut_q = self.with_queue_mut(|q| hi(&mut q.used_ring, v)),
_ => {
warn!("unknown virtio mmio register write: 0x{:x}", offset);
return;
}
}
}
0x100...0xfff => return self.device.write_config(offset - 0x100, data),
_ => {
warn!(
"invalid virtio mmio write: 0x{:x}:0x{:x}",
offset,
data.len()
);
return;
}
}
if self.device_activated && mut_q {
warn!("virtio queue was changed after device was activated");
}
if !self.device_activated && self.is_driver_ready() && self.are_queues_valid() {
if let Some(interrupt_evt) = self.interrupt_evt.take() {
if let Some(interrupt_resample_evt) = self.interrupt_resample_evt.take() {
if let Some(mem) = self.mem.take() {
self.device.activate(
mem,
interrupt_evt,
interrupt_resample_evt,
self.interrupt_status.clone(),
self.queues.clone(),
self.queue_evts.split_off(0),
);
self.device_activated = true;
}
}
}
}
}
}

View file

@ -8,7 +8,6 @@ mod balloon;
mod block;
#[cfg(feature = "gpu")]
mod gpu;
mod mmio;
mod net;
mod p9;
mod queue;
@ -24,7 +23,6 @@ pub use self::balloon::*;
pub use self::block::*;
#[cfg(feature = "gpu")]
pub use self::gpu::*;
pub use self::mmio::*;
pub use self::net::*;
pub use self::p9::*;
pub use self::queue::*;