From aee33b5862f329066a82ef58cd6005f11bd3f5ab Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Tue, 25 Feb 2020 14:41:28 +0800 Subject: [PATCH] virtio: Add VIRTIO_RING_F_EVENT_IDX test case VIRTIO_RING_F_EVENT_IDX use Wrapping(u16) for irq suppressing, this test case to avoid some corner case for Wrapping. BUG=None TEST=Run build_test.py Change-Id: I47d377056fefcc36739bb197e30319deafb0faf4 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2073902 Reviewed-by: Daniel Verkamp Tested-by: kokoro Commit-Queue: Xiong Zhang --- devices/src/virtio/queue.rs | 291 +++++++++++++++++++++++++++++++++++- 1 file changed, 290 insertions(+), 1 deletion(-) diff --git a/devices/src/virtio/queue.rs b/devices/src/virtio/queue.rs index 18ddd6d0f7..2613d2f57c 100644 --- a/devices/src/virtio/queue.rs +++ b/devices/src/virtio/queue.rs @@ -450,10 +450,15 @@ impl Queue { } /// inject interrupt into guest on this queue - pub fn trigger_interrupt(&mut self, mem: &GuestMemory, interrupt: &Interrupt) { + /// return true: interrupt is injected into guest for this queue + /// false: interrupt isn't injected + pub fn trigger_interrupt(&mut self, mem: &GuestMemory, interrupt: &Interrupt) -> bool { if self.available_interrupt_enabled(mem) { self.last_used = self.next_used; interrupt.signal_used_queue(self.vector); + true + } else { + false } } @@ -462,3 +467,287 @@ impl Queue { self.features |= features; } } + +#[cfg(test)] +mod tests { + use super::*; + use data_model::{DataInit, Le16, Le32, Le64}; + use std::convert::TryInto; + use std::sync::atomic::AtomicUsize; + use std::sync::Arc; + use sys_util::EventFd; + + const GUEST_MEMORY_SIZE: u64 = 0x10000; + const DESC_OFFSET: u64 = 0; + const AVAIL_OFFSET: u64 = 0x200; + const USED_OFFSET: u64 = 0x400; + const QUEUE_SIZE: usize = 0x10; + const BUFFER_OFFSET: u64 = 0x8000; + const BUFFER_LEN: u32 = 0x400; + + #[derive(Copy, Clone, Debug)] + #[repr(C)] + struct Desc { + addr: Le64, + len: Le32, + flags: Le16, + next: Le16, + } + // Safe as this only runs in test + unsafe impl DataInit for Desc {} + + #[derive(Copy, Clone, Debug)] + #[repr(C)] + struct Avail { + flags: Le16, + idx: Le16, + ring: [Le16; QUEUE_SIZE], + used_event: Le16, + } + // Safe as this only runs in test + unsafe impl DataInit for Avail {} + impl Default for Avail { + fn default() -> Self { + Avail { + flags: Le16::from(0u16), + idx: Le16::from(0u16), + ring: [Le16::from(0u16); QUEUE_SIZE], + used_event: Le16::from(0u16), + } + } + } + + #[derive(Copy, Clone, Debug)] + #[repr(C)] + struct UsedElem { + id: Le32, + len: Le32, + } + // Safe as this only runs in test + unsafe impl DataInit for UsedElem {} + impl Default for UsedElem { + fn default() -> Self { + UsedElem { + id: Le32::from(0u32), + len: Le32::from(0u32), + } + } + } + + #[derive(Copy, Clone, Debug)] + #[repr(C)] + struct Used { + flags: Le16, + idx: Le16, + used_elem_ring: [UsedElem; QUEUE_SIZE], + avail_event: Le16, + } + // Safe as this only runs in test + unsafe impl DataInit for Used {} + impl Default for Used { + fn default() -> Self { + Used { + flags: Le16::from(0u16), + idx: Le16::from(0u16), + used_elem_ring: [UsedElem::default(); QUEUE_SIZE], + avail_event: Le16::from(0u16), + } + } + } + + fn setup_vq(queue: &mut Queue, mem: &GuestMemory) { + let desc = Desc { + addr: Le64::from(BUFFER_OFFSET), + len: Le32::from(BUFFER_LEN), + flags: Le16::from(0u16), + next: Le16::from(1u16), + }; + let _ = mem.write_obj_at_addr(desc, GuestAddress(DESC_OFFSET)); + + let avail = Avail::default(); + let _ = mem.write_obj_at_addr(avail, GuestAddress(AVAIL_OFFSET)); + + let used = Used::default(); + let _ = mem.write_obj_at_addr(used, GuestAddress(USED_OFFSET)); + + queue.desc_table = GuestAddress(DESC_OFFSET); + queue.avail_ring = GuestAddress(AVAIL_OFFSET); + queue.used_ring = GuestAddress(USED_OFFSET); + queue.ack_features((1u64) << VIRTIO_RING_F_EVENT_IDX); + } + + #[test] + fn queue_event_id_guest_fast() { + let mut queue = Queue::new(QUEUE_SIZE.try_into().unwrap()); + let memory_start_addr = GuestAddress(0x0); + let mem = GuestMemory::new(&vec![(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap(); + setup_vq(&mut queue, &mem); + + let interrupt = Interrupt::new( + Arc::new(AtomicUsize::new(0)), + EventFd::new().unwrap(), + EventFd::new().unwrap(), + None, + 10, + ); + + // Calculating the offset of used_event within Avail structure + let used_event_offset: u64 = + unsafe { &(*(::std::ptr::null::())).used_event as *const _ as u64 }; + let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset); + + // Assume driver submit 0x100 req to device, + // device has handled them, so increase self.next_used to 0x100 + let mut device_generate: Wrapping = Wrapping(0x100); + for _ in 0..device_generate.0 { + queue.add_used(&mem, 0x0, BUFFER_LEN); + } + + // At this moment driver hasn't handled any interrupts yet, so it + // should inject interrupt. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true); + + // Driver handle all the interrupts and update avail.used_event to 0x100 + let mut driver_handled = device_generate; + let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address); + + // At this moment driver have handled all the interrupts, and + // device doesn't generate more data, so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // Assume driver submit another u16::MAX - 0x100 req to device, + // Device has handled all of them, so increase self.next_used to u16::MAX + for _ in device_generate.0..u16::max_value() { + queue.add_used(&mem, 0x0, BUFFER_LEN); + } + device_generate = Wrapping(u16::max_value()); + + // At this moment driver just handled 0x100 interrupts, so it + // should inject interrupt. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true); + + // driver handle all the interrupts and update avail.used_event to u16::MAX + driver_handled = device_generate; + let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address); + + // At this moment driver have handled all the interrupts, and + // device doesn't generate more data, so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // Assume driver submit another 1 request, + // device has handled it, so wrap self.next_used to 0 + queue.add_used(&mem, 0x0, BUFFER_LEN); + device_generate += Wrapping(1); + + // At this moment driver has handled all the previous interrupts, so it + // should inject interrupt again. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true); + + // driver handle that interrupts and update avail.used_event to 0 + driver_handled = device_generate; + let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address); + + // At this moment driver have handled all the interrupts, and + // device doesn't generate more data, so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + } + + #[test] + fn queue_event_id_guest_slow() { + let mut queue = Queue::new(QUEUE_SIZE.try_into().unwrap()); + let memory_start_addr = GuestAddress(0x0); + let mem = GuestMemory::new(&vec![(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap(); + setup_vq(&mut queue, &mem); + + let interrupt = Interrupt::new( + Arc::new(AtomicUsize::new(0)), + EventFd::new().unwrap(), + EventFd::new().unwrap(), + None, + 10, + ); + + // Calculating the offset of used_event within Avail structure + let used_event_offset: u64 = + unsafe { &(*(::std::ptr::null::())).used_event as *const _ as u64 }; + let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset); + + // Assume driver submit 0x100 req to device, + // device have handled 0x100 req, so increase self.next_used to 0x100 + let mut device_generate: Wrapping = Wrapping(0x100); + for _ in 0..device_generate.0 { + queue.add_used(&mem, 0x0, BUFFER_LEN); + } + + // At this moment driver hasn't handled any interrupts yet, so it + // should inject interrupt. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true); + + // Driver handle part of the interrupts and update avail.used_event to 0x80 + let mut driver_handled = Wrapping(0x80); + let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address); + + // At this moment driver hasn't finished last interrupt yet, + // so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // Assume driver submit another 1 request, + // device has handled it, so increment self.next_used. + queue.add_used(&mem, 0x0, BUFFER_LEN); + device_generate += Wrapping(1); + + // At this moment driver hasn't finished last interrupt yet, + // so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // Assume driver submit another u16::MAX - 0x101 req to device, + // Device has handled all of them, so increase self.next_used to u16::MAX + for _ in device_generate.0..u16::max_value() { + queue.add_used(&mem, 0x0, BUFFER_LEN); + } + device_generate = Wrapping(u16::max_value()); + + // At this moment driver hasn't finished last interrupt yet, + // so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // driver handle most of the interrupts and update avail.used_event to u16::MAX - 1, + driver_handled = device_generate - Wrapping(1); + let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address); + + // Assume driver submit another 1 request, + // device has handled it, so wrap self.next_used to 0 + queue.add_used(&mem, 0x0, BUFFER_LEN); + device_generate += Wrapping(1); + + // At this moment driver has already finished the last interrupt(0x100), + // and device service other request, so new interrupt is needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true); + + // Assume driver submit another 1 request, + // device has handled it, so increment self.next_used to 1 + queue.add_used(&mem, 0x0, BUFFER_LEN); + device_generate += Wrapping(1); + + // At this moment driver hasn't finished last interrupt((Wrapping(0)) yet, + // so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // driver handle all the remain interrupts and wrap avail.used_event to 0x1. + driver_handled = device_generate; + let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address); + + // At this moment driver has handled all the interrupts, and + // device doesn't generate more data, so interrupt isn't needed. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false); + + // Assume driver submit another 1 request, + // device has handled it, so increase self.next_used. + queue.add_used(&mem, 0x0, BUFFER_LEN); + device_generate += Wrapping(1); + + // At this moment driver has finished all the previous interrupts, so it + // should inject interrupt again. + assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true); + } +}