virtqueue: Use stronger fences

Acquire/Release orderings are not sufficient when accessing memory
that's also touched by the guest kernel.  Use SeqCst ordering, which
also more closely matches what the kernel does.

BUG=b:200637442
TEST=Run vm.Virtiofs 15 times in a row both with and without
     VIRTIO_RING_F_EVENT_IDX enabled and observe that the test does not
     hang

Change-Id: I54f1d7123bdcbbf01f94935193e48a9c2e252bbc
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3199301
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Keiichi Watanabe <keiichiw@chromium.org>
Commit-Queue: Chirantan Ekbote <chirantan@chromium.org>
This commit is contained in:
Chirantan Ekbote 2021-10-01 17:06:57 +09:00 committed by Commit Bot
parent e7b6f4fe6e
commit b9917a22a3

View file

@ -329,12 +329,11 @@ impl Queue {
// All available ring entries between `self.next_avail` and `get_avail_index()` are available
// to be processed by the device.
fn get_avail_index(&self, mem: &GuestMemory) -> Wrapping<u16> {
fence(Ordering::SeqCst);
let avail_index_addr = self.avail_ring.unchecked_add(2);
let avail_index: u16 = mem.read_obj_from_addr(avail_index_addr).unwrap();
// Make sure following reads (e.g. desc_idx) don't pass the avail_index read.
fence(Ordering::Acquire);
Wrapping(avail_index)
}
@ -345,8 +344,7 @@ impl Queue {
//
// This value is only used if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
fn set_avail_event(&mut self, mem: &GuestMemory, avail_index: Wrapping<u16>) {
// Ensure that all previous writes are available before this one.
fence(Ordering::Release);
fence(Ordering::SeqCst);
let avail_event_addr = self
.used_ring
@ -358,12 +356,10 @@ impl Queue {
// Query the value of a single-bit flag in the available ring.
//
// Returns `true` if `flag` is currently set (by the driver) in the available ring flags.
#[allow(dead_code)]
fn get_avail_flag(&self, mem: &GuestMemory, flag: u16) -> bool {
let avail_flags: u16 = mem.read_obj_from_addr(self.avail_ring).unwrap();
fence(Ordering::SeqCst);
// Don't allow subsequent reads to be ordered before the avail_flags read.
fence(Ordering::Acquire);
let avail_flags: u16 = mem.read_obj_from_addr(self.avail_ring).unwrap();
avail_flags & flag == flag
}
@ -376,14 +372,13 @@ impl Queue {
//
// This value is only valid if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
fn get_used_event(&self, mem: &GuestMemory) -> Wrapping<u16> {
fence(Ordering::SeqCst);
let used_event_addr = self
.avail_ring
.unchecked_add(4 + 2 * u64::from(self.actual_size()));
let used_event: u16 = mem.read_obj_from_addr(used_event_addr).unwrap();
// Prevent any reads after this from being ordered before the used_event read.
fence(Ordering::Acquire);
Wrapping(used_event)
}
@ -392,8 +387,7 @@ impl Queue {
// This indicates to the driver that all entries up to (but not including) `used_index` have
// been used by the device and may be processed by the driver.
fn set_used_index(&mut self, mem: &GuestMemory, used_index: Wrapping<u16>) {
// This fence ensures all descriptor writes are visible before the index update.
fence(Ordering::Release);
fence(Ordering::SeqCst);
let used_index_addr = self.used_ring.unchecked_add(2);
mem.write_obj_at_addr(used_index.0, used_index_addr)
@ -404,8 +398,7 @@ impl Queue {
//
// Changes the bit specified by the mask in `flag` to `value`.
fn set_used_flag(&mut self, mem: &GuestMemory, flag: u16, value: bool) {
// This fence ensures all descriptor writes are visible before the flag update.
fence(Ordering::Release);
fence(Ordering::SeqCst);
let mut used_flags: u16 = mem.read_obj_from_addr(self.used_ring).unwrap();
if value {