mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2025-02-05 18:20:34 +00:00
data_model: fix flaky observe_mutate
The original version of this test used sleeps, retries, and vague commentary about how the test is made reliable. The new version of test uses real synchronization primitives.. BUG=chromium:1045426 TEST=put all cores under load; cargo test -p data_model Change-Id: I7fa4ac45a9003e2ebb98c57ca6a03be17bdf65cf Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2029925 Tested-by: Zach Reizner <zachr@chromium.org> Tested-by: kokoro <noreply+kokoro@google.com> Auto-Submit: Zach Reizner <zachr@chromium.org> Reviewed-by: Dylan Reid <dgreid@chromium.org> Commit-Queue: Zach Reizner <zachr@chromium.org>
This commit is contained in:
parent
8b8c01e1ad
commit
b427411b64
1 changed files with 11 additions and 24 deletions
|
@ -401,9 +401,8 @@ impl<'a, T: DataInit> VolatileRef<'a, T> {
|
|||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::thread::{sleep, spawn};
|
||||
use std::time::Duration;
|
||||
use std::sync::{Arc, Barrier};
|
||||
use std::thread::spawn;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct VecMem {
|
||||
|
@ -472,35 +471,23 @@ mod tests {
|
|||
let a_clone = a.clone();
|
||||
let v_ref = a.get_ref::<u8>(0).unwrap();
|
||||
v_ref.store(99);
|
||||
|
||||
let start_barrier = Arc::new(Barrier::new(2));
|
||||
let thread_start_barrier = start_barrier.clone();
|
||||
let end_barrier = Arc::new(Barrier::new(2));
|
||||
let thread_end_barrier = end_barrier.clone();
|
||||
spawn(move || {
|
||||
sleep(Duration::from_millis(10));
|
||||
thread_start_barrier.wait();
|
||||
let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
|
||||
clone_v_ref.store(0);
|
||||
thread_end_barrier.wait();
|
||||
});
|
||||
|
||||
// Technically this is a race condition but we have to observe the v_ref's value changing
|
||||
// somehow and this helps to ensure the sleep actually happens before the store rather then
|
||||
// being reordered by the compiler.
|
||||
assert_eq!(v_ref.load(), 99);
|
||||
|
||||
// Granted we could have a machine that manages to perform this many volatile loads in the
|
||||
// amount of time the spawned thread sleeps, but the most likely reason the retry limit will
|
||||
// get reached is because v_ref.load() is not actually performing the required volatile read
|
||||
// or v_ref.store() is not doing a volatile write. A timer based solution was avoided
|
||||
// because that might use a syscall which could hint the optimizer to reload v_ref's pointer
|
||||
// regardless of volatile status. Note that we use a longer retry duration for optimized
|
||||
// builds.
|
||||
#[cfg(debug_assertions)]
|
||||
const RETRY_MAX: u64 = 500_000_000;
|
||||
#[cfg(not(debug_assertions))]
|
||||
const RETRY_MAX: u64 = 10_000_000_000;
|
||||
start_barrier.wait();
|
||||
end_barrier.wait();
|
||||
|
||||
let mut retry = 0;
|
||||
while v_ref.load() == 99 && retry < RETRY_MAX {
|
||||
retry += 1;
|
||||
}
|
||||
|
||||
assert_ne!(retry, RETRY_MAX, "maximum retry exceeded");
|
||||
assert_eq!(v_ref.load(), 0);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue