Move hypervisor::kvm tests to integration tests

These tests require kvm to pass and are not suitable for a unit
test.

In order to move the test, KvmVm::create_vcpu had to be renamed
to prevent conflicts with the trait function VcpuX86_64::create_vcpu,
implemented for the same type.

The aarch64 variant does not actually run in CI since the tests
do not pass in the emulated aarch64 environment. So the
DO_NOT_RUN_AARCH64 flag remains.

BUG=b:244623454
TEST=presubmit

Change-Id: I79bba3926a38d19350e2fd3c7bfa4662499223e5
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3999799
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Commit-Queue: Dennis Kempin <denniskempin@google.com>
This commit is contained in:
Dennis Kempin 2022-11-02 21:35:20 +00:00 committed by crosvm LUCI
parent b50f6580a2
commit 3375878f14
7 changed files with 806 additions and 782 deletions

View file

@ -195,8 +195,8 @@ impl VmAArch64 for KvmVm {
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
// create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
// or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
Ok(Box::new(KvmVm::create_vcpu(self, id)?))
// or VcpuX86. But both use the same implementation in KvmVm::create_kvm_vcpu.
Ok(Box::new(self.create_kvm_vcpu(id)?))
}
}
@ -826,57 +826,3 @@ pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
}
}
}
#[cfg(test)]
mod tests {
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
use super::super::Kvm;
use super::*;
use crate::IrqRoute;
use crate::IrqSource;
use crate::IrqSourceChip;
#[test]
fn set_gsi_routing() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
vm.set_gsi_routing(&[]).unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Gic,
pin: 3,
},
}])
.unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
}])
.unwrap();
vm.set_gsi_routing(&[
IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Gic,
pin: 3,
},
},
IrqRoute {
gsi: 2,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
},
])
.unwrap();
}
}

View file

@ -5,7 +5,7 @@
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
mod aarch64;
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
use aarch64::*;
pub use aarch64::*;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod x86_64;
@ -68,7 +68,7 @@ use sync::Mutex;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use x86_64::*;
pub use x86_64::*;
use crate::ClockState;
use crate::Config;
@ -249,7 +249,7 @@ impl KvmVm {
Ok(vm)
}
fn create_vcpu(&self, id: usize) -> Result<KvmVcpu> {
pub fn create_kvm_vcpu(&self, id: usize) -> Result<KvmVcpu> {
let run_mmap_size = self.kvm.get_vcpu_mmap_size()?;
// Safe because we know that our file is a VM fd and we verify the return result.
@ -436,7 +436,7 @@ impl KvmVm {
}
/// Checks whether a particular KVM-specific capability is available for this VM.
fn check_raw_capability(&self, capability: KvmCap) -> bool {
pub fn check_raw_capability(&self, capability: KvmCap) -> bool {
// Safe because we know that our file is a KVM fd, and if the cap is invalid KVM assumes
// it's an unavailable extension and returns 0.
unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), capability as c_ulong) == 1 }
@ -1287,301 +1287,3 @@ impl From<&MPState> for kvm_mp_state {
}
}
}
#[cfg(test)]
mod tests {
use std::thread;
use base::pagesize;
use base::FromRawDescriptor;
use base::MemoryMappingArena;
use base::MemoryMappingBuilder;
use vm_memory::GuestAddress;
use super::*;
#[test]
fn dirty_log_size() {
let page_size = pagesize();
assert_eq!(dirty_log_bitmap_size(0), 0);
assert_eq!(dirty_log_bitmap_size(page_size), 1);
assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
}
#[test]
fn new() {
Kvm::new().unwrap();
}
#[test]
fn check_capability() {
let kvm = Kvm::new().unwrap();
assert!(kvm.check_capability(HypervisorCap::UserMemory));
assert!(!kvm.check_capability(HypervisorCap::S390UserSigp));
}
#[test]
fn create_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
KvmVm::new(&kvm, gm, Default::default()).unwrap();
}
#[test]
fn clone_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.try_clone().unwrap();
}
#[test]
fn send_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
thread::spawn(move || {
let _vm = vm;
})
.join()
.unwrap();
}
#[test]
fn check_vm_capability() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
assert!(vm.check_raw_capability(KvmCap::UserMemory));
// I assume nobody is testing this on s390
assert!(!vm.check_raw_capability(KvmCap::S390UserSigp));
}
#[test]
fn create_vcpu() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_vcpu(0).unwrap();
}
#[test]
fn get_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let obj_addr = GuestAddress(0xf0);
vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
assert_eq!(read_val, 67u8);
}
#[test]
fn add_memory() {
let kvm = Kvm::new().unwrap();
let gm =
GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x10000), Box::new(mem), false, false)
.unwrap();
}
#[test]
fn add_memory_ro() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), true, false)
.unwrap();
}
#[test]
fn remove_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
let mem_ptr = mem.as_ptr();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let removed_mem = vm.remove_memory_region(slot).unwrap();
assert_eq!(removed_mem.size(), mem_size);
assert_eq!(removed_mem.as_ptr(), mem_ptr);
}
#[test]
fn remove_invalid_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
assert!(vm.remove_memory_region(0).is_err());
}
#[test]
fn overlap_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x2000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
assert!(vm
.add_memory_region(GuestAddress(0x2000), Box::new(mem), false, false)
.is_err());
}
#[test]
fn sync_memory() {
let kvm = Kvm::new().unwrap();
let gm =
GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingArena::new(mem_size).unwrap();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
vm.msync_memory_region(slot, mem_size, 0).unwrap();
assert!(vm.msync_memory_region(slot, mem_size + 1, 0).is_err());
assert!(vm.msync_memory_region(slot + 1, mem_size, 0).is_err());
}
#[test]
fn register_irqfd() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
let evtfd3 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd(4, &evtfd1, None).unwrap();
vm.register_irqfd(8, &evtfd2, None).unwrap();
vm.register_irqfd(4, &evtfd3, None).unwrap();
vm.register_irqfd(4, &evtfd3, None).unwrap_err();
}
#[test]
fn unregister_irqfd() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
let evtfd3 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd(4, &evtfd1, None).unwrap();
vm.register_irqfd(8, &evtfd2, None).unwrap();
vm.register_irqfd(4, &evtfd3, None).unwrap();
vm.unregister_irqfd(4, &evtfd1).unwrap();
vm.unregister_irqfd(8, &evtfd2).unwrap();
vm.unregister_irqfd(4, &evtfd3).unwrap();
}
#[test]
fn irqfd_resample() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd(4, &evtfd1, Some(&evtfd2)).unwrap();
vm.unregister_irqfd(4, &evtfd1).unwrap();
// Ensures the ioctl is actually reading the resamplefd.
vm.register_irqfd(4, &evtfd1, Some(unsafe { &Event::from_raw_descriptor(-1) }))
.unwrap_err();
}
#[test]
fn set_signal_mask() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
vcpu.set_signal_mask(&[base::SIGRTMIN() + 0]).unwrap();
}
#[test]
fn vcpu_mmap_size() {
let kvm = Kvm::new().unwrap();
let mmap_size = kvm.get_vcpu_mmap_size().unwrap();
let page_size = pagesize();
assert!(mmap_size >= page_size);
assert!(mmap_size % page_size == 0);
}
#[test]
fn register_ioevent() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd = Event::new().unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc1),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc2),
Datamatch::U16(Some(0x1337u16)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc4),
Datamatch::U32(Some(0xdeadbeefu32)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc8),
Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
)
.unwrap();
}
#[test]
fn unregister_ioevent() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd = Event::new().unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Mmio(0x1004),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
vm.unregister_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.unregister_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.unregister_ioevent(
&evtfd,
IoEventAddress::Mmio(0x1004),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
}
}

View file

@ -60,7 +60,7 @@ use crate::NUM_IOAPIC_PINS;
type KvmCpuId = kvm::CpuId;
fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
descriptor: &T,
kind: IoctlNr,
initial_capacity: usize,
@ -477,7 +477,7 @@ impl VmX86_64 for KvmVm {
fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
// create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
// or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
Ok(Box::new(KvmVm::create_vcpu(self, id)?))
Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
}
/// Sets the address of the three-page region in the VM's address space.
@ -1382,423 +1382,3 @@ fn to_kvm_msrs(vec: &[Register]) -> Vec<kvm_msrs> {
msrs[0].nmsrs = vec.len() as u32;
msrs
}
#[cfg(test)]
mod tests {
use libc::EINVAL;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
use super::*;
use crate::DeliveryMode;
use crate::DeliveryStatus;
use crate::DestinationMode;
use crate::Hypervisor;
use crate::HypervisorCap;
use crate::HypervisorX86_64;
use crate::IoapicRedirectionTableEntry;
use crate::IoapicState;
use crate::IrqRoute;
use crate::IrqSource;
use crate::IrqSourceChip;
use crate::LapicState;
use crate::PicInitState;
use crate::PicState;
use crate::PitChannelState;
use crate::PitRWMode;
use crate::PitRWState;
use crate::PitState;
use crate::TriggerMode;
use crate::Vcpu;
use crate::Vm;
#[test]
fn get_supported_cpuid() {
let hypervisor = Kvm::new().unwrap();
let cpuid = hypervisor.get_supported_cpuid().unwrap();
assert!(cpuid.cpu_id_entries.len() > 0);
}
#[test]
fn get_emulated_cpuid() {
let hypervisor = Kvm::new().unwrap();
let cpuid = hypervisor.get_emulated_cpuid().unwrap();
assert!(cpuid.cpu_id_entries.len() > 0);
}
#[test]
fn get_msr_index_list() {
let kvm = Kvm::new().unwrap();
let msr_list = kvm.get_msr_index_list().unwrap();
assert!(msr_list.len() >= 2);
}
#[test]
fn entries_double_on_error() {
let hypervisor = Kvm::new().unwrap();
let cpuid =
get_cpuid_with_initial_capacity(&hypervisor, KVM_GET_SUPPORTED_CPUID(), 4).unwrap();
assert!(cpuid.cpu_id_entries.len() > 4);
}
#[test]
fn check_vm_arch_capability() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
assert!(vm.check_capability(VmCap::PvClock));
}
#[test]
fn pic_state() {
let state = PicState {
last_irr: 0b00000001,
irr: 0b00000010,
imr: 0b00000100,
isr: 0b00001000,
priority_add: 0b00010000,
irq_base: 0b00100000,
read_reg_select: false,
poll: true,
special_mask: true,
init_state: PicInitState::Icw3,
auto_eoi: true,
rotate_on_auto_eoi: false,
special_fully_nested_mode: true,
use_4_byte_icw: true,
elcr: 0b01000000,
elcr_mask: 0b10000000,
};
let kvm_state = kvm_pic_state::from(&state);
assert_eq!(kvm_state.last_irr, 0b00000001);
assert_eq!(kvm_state.irr, 0b00000010);
assert_eq!(kvm_state.imr, 0b00000100);
assert_eq!(kvm_state.isr, 0b00001000);
assert_eq!(kvm_state.priority_add, 0b00010000);
assert_eq!(kvm_state.irq_base, 0b00100000);
assert_eq!(kvm_state.read_reg_select, 0);
assert_eq!(kvm_state.poll, 1);
assert_eq!(kvm_state.special_mask, 1);
assert_eq!(kvm_state.init_state, 0b10);
assert_eq!(kvm_state.auto_eoi, 1);
assert_eq!(kvm_state.rotate_on_auto_eoi, 0);
assert_eq!(kvm_state.special_fully_nested_mode, 1);
assert_eq!(kvm_state.auto_eoi, 1);
assert_eq!(kvm_state.elcr, 0b01000000);
assert_eq!(kvm_state.elcr_mask, 0b10000000);
let orig_state = PicState::from(&kvm_state);
assert_eq!(state, orig_state);
}
#[test]
fn ioapic_state() {
let mut entry = IoapicRedirectionTableEntry::default();
let noredir = IoapicRedirectionTableEntry::default();
// default entry should be 0
assert_eq!(entry.get(0, 64), 0);
// set some values on our entry
entry.set_vector(0b11111111);
entry.set_delivery_mode(DeliveryMode::SMI);
entry.set_dest_mode(DestinationMode::Physical);
entry.set_delivery_status(DeliveryStatus::Pending);
entry.set_polarity(1);
entry.set_remote_irr(true);
entry.set_trigger_mode(TriggerMode::Level);
entry.set_interrupt_mask(true);
entry.set_dest_id(0b10101010);
// Bit repr as: destid-reserved--------------------------------flags----vector--
let bit_repr = 0b1010101000000000000000000000000000000000000000011111001011111111;
// where flags is [interrupt_mask(1), trigger_mode(Level=1), remote_irr(1), polarity(1),
// delivery_status(Pending=1), dest_mode(Physical=0), delivery_mode(SMI=010)]
assert_eq!(entry.get(0, 64), bit_repr);
let mut state = IoapicState {
base_address: 1,
ioregsel: 2,
ioapicid: 4,
current_interrupt_level_bitmap: 8,
redirect_table: [noredir; 120],
};
// Initialize first 24 (kvm_state limit) redirection entries
for i in 0..24 {
state.redirect_table[i] = entry;
}
let kvm_state = kvm_ioapic_state::from(&state);
assert_eq!(kvm_state.base_address, 1);
assert_eq!(kvm_state.ioregsel, 2);
assert_eq!(kvm_state.id, 4);
assert_eq!(kvm_state.irr, 8);
assert_eq!(kvm_state.pad, 0);
// check first 24 entries
for i in 0..24 {
assert_eq!(unsafe { kvm_state.redirtbl[i].bits }, bit_repr);
}
// compare with a conversion back
assert_eq!(state, IoapicState::from(&kvm_state));
}
#[test]
fn lapic_state() {
let mut state = LapicState { regs: [0; 64] };
// Apic id register, 4 bytes each with a different bit set
state.regs[2] = 1 | 2 << 8 | 4 << 16 | 8 << 24;
let kvm_state = kvm_lapic_state::from(&state);
// check little endian bytes in kvm_state
for i in 0..4 {
assert_eq!(kvm_state.regs[32 + i] as u8, 2u8.pow(i as u32));
}
// Test converting back to a LapicState
assert_eq!(state, LapicState::from(&kvm_state));
}
#[test]
fn pit_state() {
let channel = PitChannelState {
count: 256,
latched_count: 512,
count_latched: PitRWState::LSB,
status_latched: false,
status: 7,
read_state: PitRWState::MSB,
write_state: PitRWState::Word1,
reload_value: 8,
rw_mode: PitRWMode::Both,
mode: 5,
bcd: false,
gate: true,
count_load_time: 1024,
};
let kvm_channel = kvm_pit_channel_state::from(&channel);
// compare the various field translations
assert_eq!(kvm_channel.count, 256);
assert_eq!(kvm_channel.latched_count, 512);
assert_eq!(kvm_channel.count_latched, 1);
assert_eq!(kvm_channel.status_latched, 0);
assert_eq!(kvm_channel.status, 7);
assert_eq!(kvm_channel.read_state, 2);
assert_eq!(kvm_channel.write_state, 4);
assert_eq!(kvm_channel.write_latch, 8);
assert_eq!(kvm_channel.rw_mode, 3);
assert_eq!(kvm_channel.mode, 5);
assert_eq!(kvm_channel.bcd, 0);
assert_eq!(kvm_channel.gate, 1);
assert_eq!(kvm_channel.count_load_time, 1024);
// convert back and compare
assert_eq!(channel, PitChannelState::from(&kvm_channel));
// convert the full pitstate
let state = PitState {
channels: [channel, channel, channel],
flags: 255,
};
let kvm_state = kvm_pit_state2::from(&state);
assert_eq!(kvm_state.flags, 255);
// compare a channel
assert_eq!(channel, PitChannelState::from(&kvm_state.channels[0]));
// convert back and compare
assert_eq!(state, PitState::from(&kvm_state));
}
#[test]
fn clock_handling() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mut clock_data = vm.get_pvclock().unwrap();
clock_data.clock += 1000;
vm.set_pvclock(&clock_data).unwrap();
}
#[test]
fn set_gsi_routing() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
vm.set_gsi_routing(&[]).unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Ioapic,
pin: 3,
},
}])
.unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
}])
.unwrap();
vm.set_gsi_routing(&[
IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Ioapic,
pin: 3,
},
},
IrqRoute {
gsi: 2,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
},
])
.unwrap();
}
#[test]
fn set_identity_map_addr() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.set_identity_map_addr(GuestAddress(0x20000)).unwrap();
}
#[test]
fn mp_state() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let state = vcpu.get_mp_state().unwrap();
vcpu.set_mp_state(&state).unwrap();
}
#[test]
fn enable_feature() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
unsafe { vcpu.enable_raw_capability(kvm_sys::KVM_CAP_HYPERV_SYNIC, &[0; 4]) }.unwrap();
}
#[test]
fn from_fpu() {
// Fpu has the largest arrays in our struct adapters. Test that they're small enough for
// Rust to copy.
let mut fpu: Fpu = Default::default();
let m = fpu.xmm.len();
let n = fpu.xmm[0].len();
fpu.xmm[m - 1][n - 1] = 42;
let fpu = kvm_fpu::from(&fpu);
assert_eq!(fpu.xmm.len(), m);
assert_eq!(fpu.xmm[0].len(), n);
assert_eq!(fpu.xmm[m - 1][n - 1], 42);
}
#[test]
fn debugregs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut dregs = vcpu.get_debugregs().unwrap();
dregs.dr7 = 13;
vcpu.set_debugregs(&dregs).unwrap();
let dregs2 = vcpu.get_debugregs().unwrap();
assert_eq!(dregs.dr7, dregs2.dr7);
}
#[test]
fn xcrs() {
let kvm = Kvm::new().unwrap();
if !kvm.check_capability(HypervisorCap::Xcrs) {
return;
}
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut xcrs = vcpu.get_xcrs().unwrap();
xcrs[0].value = 1;
vcpu.set_xcrs(&xcrs).unwrap();
let xcrs2 = vcpu.get_xcrs().unwrap();
assert_eq!(xcrs[0].value, xcrs2[0].value);
}
#[test]
fn get_msrs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut msrs = vec![
// This one should succeed
Register {
id: 0x0000011e,
..Default::default()
},
// This one will fail to fetch
Register {
id: 0x000003f1,
..Default::default()
},
];
vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(msrs.len(), 1);
}
#[test]
fn set_msrs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
const MSR_TSC_AUX: u32 = 0xc0000103;
let mut msrs = vec![Register {
id: MSR_TSC_AUX,
value: 42,
}];
vcpu.set_msrs(&msrs).unwrap();
msrs[0].value = 0;
vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(msrs.len(), 1);
assert_eq!(msrs[0].id, MSR_TSC_AUX);
assert_eq!(msrs[0].value, 42);
}
#[test]
fn get_hyperv_cpuid() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let cpuid = vcpu.get_hyperv_cpuid();
// Older kernels don't support so tolerate this kind of failure.
match cpuid {
Ok(_) => {}
Err(e) => {
assert_eq!(e.errno(), EINVAL);
}
}
}
}

View file

@ -0,0 +1,53 @@
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use hypervisor::kvm::*;
use hypervisor::*;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
#[test]
fn set_gsi_routing() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
vm.set_gsi_routing(&[]).unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Gic,
pin: 3,
},
}])
.unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
}])
.unwrap();
vm.set_gsi_routing(&[
IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Gic,
pin: 3,
},
},
IrqRoute {
gsi: 2,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
},
])
.unwrap();
}

View file

@ -0,0 +1,320 @@
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(unix)]
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
mod aarch64;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod x86_64;
use std::thread;
use base::pagesize;
use base::Event;
use base::FromRawDescriptor;
use base::MappedRegion;
use base::MemoryMappingArena;
use base::MemoryMappingBuilder;
use hypervisor::kvm::dirty_log_bitmap_size;
use hypervisor::kvm::Kvm;
use hypervisor::kvm::KvmVm;
use hypervisor::Datamatch;
use hypervisor::Hypervisor;
use hypervisor::HypervisorCap;
use hypervisor::IoEventAddress;
use hypervisor::Vm;
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
use hypervisor::VmAArch64;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use hypervisor::VmX86_64;
use kvm::Cap;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
#[test]
fn dirty_log_size() {
let page_size = pagesize();
assert_eq!(dirty_log_bitmap_size(0), 0);
assert_eq!(dirty_log_bitmap_size(page_size), 1);
assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
}
#[test]
fn new() {
Kvm::new().unwrap();
}
#[test]
fn check_capability() {
let kvm = Kvm::new().unwrap();
assert!(kvm.check_capability(HypervisorCap::UserMemory));
assert!(!kvm.check_capability(HypervisorCap::S390UserSigp));
}
#[test]
fn create_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
KvmVm::new(&kvm, gm, Default::default()).unwrap();
}
#[test]
fn clone_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.try_clone().unwrap();
}
#[test]
fn send_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
thread::spawn(move || {
let _vm = vm;
})
.join()
.unwrap();
}
#[test]
fn check_vm_capability() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
assert!(vm.check_raw_capability(Cap::UserMemory));
// I assume nobody is testing this on s390
assert!(!vm.check_raw_capability(Cap::S390UserSigp));
}
#[test]
fn create_vcpu() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_vcpu(0).unwrap();
}
#[test]
fn get_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let obj_addr = GuestAddress(0xf0);
vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
assert_eq!(read_val, 67u8);
}
#[test]
fn add_memory() {
let kvm = Kvm::new().unwrap();
let gm =
GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x10000), Box::new(mem), false, false)
.unwrap();
}
#[test]
fn add_memory_ro() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), true, false)
.unwrap();
}
#[test]
fn remove_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
let mem_ptr = mem.as_ptr();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let removed_mem = vm.remove_memory_region(slot).unwrap();
assert_eq!(removed_mem.size(), mem_size);
assert_eq!(removed_mem.as_ptr(), mem_ptr);
}
#[test]
fn remove_invalid_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
assert!(vm.remove_memory_region(0).is_err());
}
#[test]
fn overlap_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x2000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
assert!(vm
.add_memory_region(GuestAddress(0x2000), Box::new(mem), false, false)
.is_err());
}
#[test]
fn sync_memory() {
let kvm = Kvm::new().unwrap();
let gm =
GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingArena::new(mem_size).unwrap();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
vm.msync_memory_region(slot, mem_size, 0).unwrap();
assert!(vm.msync_memory_region(slot, mem_size + 1, 0).is_err());
assert!(vm.msync_memory_region(slot + 1, mem_size, 0).is_err());
}
#[test]
fn register_irqfd() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
let evtfd3 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd(4, &evtfd1, None).unwrap();
vm.register_irqfd(8, &evtfd2, None).unwrap();
vm.register_irqfd(4, &evtfd3, None).unwrap();
vm.register_irqfd(4, &evtfd3, None).unwrap_err();
}
#[test]
fn unregister_irqfd() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
let evtfd3 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd(4, &evtfd1, None).unwrap();
vm.register_irqfd(8, &evtfd2, None).unwrap();
vm.register_irqfd(4, &evtfd3, None).unwrap();
vm.unregister_irqfd(4, &evtfd1).unwrap();
vm.unregister_irqfd(8, &evtfd2).unwrap();
vm.unregister_irqfd(4, &evtfd3).unwrap();
}
#[test]
fn irqfd_resample() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd(4, &evtfd1, Some(&evtfd2)).unwrap();
vm.unregister_irqfd(4, &evtfd1).unwrap();
// Ensures the ioctl is actually reading the resamplefd.
vm.register_irqfd(4, &evtfd1, Some(unsafe { &Event::from_raw_descriptor(-1) }))
.unwrap_err();
}
#[test]
fn set_signal_mask() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
vcpu.set_signal_mask(&[base::SIGRTMIN() + 0]).unwrap();
}
#[test]
fn vcpu_mmap_size() {
let kvm = Kvm::new().unwrap();
let mmap_size = kvm.get_vcpu_mmap_size().unwrap();
let page_size = pagesize();
assert!(mmap_size >= page_size);
assert!(mmap_size % page_size == 0);
}
#[test]
fn register_ioevent() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd = Event::new().unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc1),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc2),
Datamatch::U16(Some(0x1337u16)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc4),
Datamatch::U32(Some(0xdeadbeefu32)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Pio(0xc8),
Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
)
.unwrap();
}
#[test]
fn unregister_ioevent() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let evtfd = Event::new().unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(
&evtfd,
IoEventAddress::Mmio(0x1004),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
vm.unregister_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.unregister_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.unregister_ioevent(
&evtfd,
IoEventAddress::Mmio(0x1004),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
}

View file

@ -0,0 +1,425 @@
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use hypervisor::kvm::get_cpuid_with_initial_capacity;
use hypervisor::kvm::Kvm;
use hypervisor::kvm::KvmVcpu;
use hypervisor::kvm::KvmVm;
use hypervisor::DeliveryMode;
use hypervisor::DeliveryStatus;
use hypervisor::DestinationMode;
use hypervisor::Fpu;
use hypervisor::Hypervisor;
use hypervisor::HypervisorCap;
use hypervisor::HypervisorX86_64;
use hypervisor::IoapicRedirectionTableEntry;
use hypervisor::IoapicState;
use hypervisor::IrqRoute;
use hypervisor::IrqSource;
use hypervisor::IrqSourceChip;
use hypervisor::LapicState;
use hypervisor::PicInitState;
use hypervisor::PicState;
use hypervisor::PitChannelState;
use hypervisor::PitRWMode;
use hypervisor::PitRWState;
use hypervisor::PitState;
use hypervisor::Register;
use hypervisor::TriggerMode;
use hypervisor::Vm;
use hypervisor::VmCap;
use hypervisor::VmX86_64;
use kvm_sys::*;
use libc::EINVAL;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
#[test]
fn get_supported_cpuid() {
let hypervisor = Kvm::new().unwrap();
let cpuid = hypervisor.get_supported_cpuid().unwrap();
assert!(cpuid.cpu_id_entries.len() > 0);
}
#[test]
fn get_emulated_cpuid() {
let hypervisor = Kvm::new().unwrap();
let cpuid = hypervisor.get_emulated_cpuid().unwrap();
assert!(cpuid.cpu_id_entries.len() > 0);
}
#[test]
fn get_msr_index_list() {
let kvm = Kvm::new().unwrap();
let msr_list = kvm.get_msr_index_list().unwrap();
assert!(msr_list.len() >= 2);
}
#[test]
fn entries_double_on_error() {
let hypervisor = Kvm::new().unwrap();
let cpuid = get_cpuid_with_initial_capacity(&hypervisor, KVM_GET_SUPPORTED_CPUID(), 4).unwrap();
assert!(cpuid.cpu_id_entries.len() > 4);
}
#[test]
fn check_vm_arch_capability() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
assert!(vm.check_capability(VmCap::PvClock));
}
#[test]
fn pic_state() {
let state = PicState {
last_irr: 0b00000001,
irr: 0b00000010,
imr: 0b00000100,
isr: 0b00001000,
priority_add: 0b00010000,
irq_base: 0b00100000,
read_reg_select: false,
poll: true,
special_mask: true,
init_state: PicInitState::Icw3,
auto_eoi: true,
rotate_on_auto_eoi: false,
special_fully_nested_mode: true,
use_4_byte_icw: true,
elcr: 0b01000000,
elcr_mask: 0b10000000,
};
let kvm_state = kvm_pic_state::from(&state);
assert_eq!(kvm_state.last_irr, 0b00000001);
assert_eq!(kvm_state.irr, 0b00000010);
assert_eq!(kvm_state.imr, 0b00000100);
assert_eq!(kvm_state.isr, 0b00001000);
assert_eq!(kvm_state.priority_add, 0b00010000);
assert_eq!(kvm_state.irq_base, 0b00100000);
assert_eq!(kvm_state.read_reg_select, 0);
assert_eq!(kvm_state.poll, 1);
assert_eq!(kvm_state.special_mask, 1);
assert_eq!(kvm_state.init_state, 0b10);
assert_eq!(kvm_state.auto_eoi, 1);
assert_eq!(kvm_state.rotate_on_auto_eoi, 0);
assert_eq!(kvm_state.special_fully_nested_mode, 1);
assert_eq!(kvm_state.auto_eoi, 1);
assert_eq!(kvm_state.elcr, 0b01000000);
assert_eq!(kvm_state.elcr_mask, 0b10000000);
let orig_state = PicState::from(&kvm_state);
assert_eq!(state, orig_state);
}
#[test]
fn ioapic_state() {
let mut entry = IoapicRedirectionTableEntry::default();
let noredir = IoapicRedirectionTableEntry::default();
// default entry should be 0
assert_eq!(entry.get(0, 64), 0);
// set some values on our entry
entry.set_vector(0b11111111);
entry.set_delivery_mode(DeliveryMode::SMI);
entry.set_dest_mode(DestinationMode::Physical);
entry.set_delivery_status(DeliveryStatus::Pending);
entry.set_polarity(1);
entry.set_remote_irr(true);
entry.set_trigger_mode(TriggerMode::Level);
entry.set_interrupt_mask(true);
entry.set_dest_id(0b10101010);
// Bit repr as: destid-reserved--------------------------------flags----vector--
let bit_repr = 0b1010101000000000000000000000000000000000000000011111001011111111;
// where flags is [interrupt_mask(1), trigger_mode(Level=1), remote_irr(1), polarity(1),
// delivery_status(Pending=1), dest_mode(Physical=0), delivery_mode(SMI=010)]
assert_eq!(entry.get(0, 64), bit_repr);
let mut state = IoapicState {
base_address: 1,
ioregsel: 2,
ioapicid: 4,
current_interrupt_level_bitmap: 8,
redirect_table: [noredir; 120],
};
// Initialize first 24 (kvm_state limit) redirection entries
for i in 0..24 {
state.redirect_table[i] = entry;
}
let kvm_state = kvm_ioapic_state::from(&state);
assert_eq!(kvm_state.base_address, 1);
assert_eq!(kvm_state.ioregsel, 2);
assert_eq!(kvm_state.id, 4);
assert_eq!(kvm_state.irr, 8);
assert_eq!(kvm_state.pad, 0);
// check first 24 entries
for i in 0..24 {
assert_eq!(unsafe { kvm_state.redirtbl[i].bits }, bit_repr);
}
// compare with a conversion back
assert_eq!(state, IoapicState::from(&kvm_state));
}
#[test]
fn lapic_state() {
let mut state = LapicState { regs: [0; 64] };
// Apic id register, 4 bytes each with a different bit set
state.regs[2] = 1 | 2 << 8 | 4 << 16 | 8 << 24;
let kvm_state = kvm_lapic_state::from(&state);
// check little endian bytes in kvm_state
for i in 0..4 {
assert_eq!(kvm_state.regs[32 + i] as u8, 2u8.pow(i as u32));
}
// Test converting back to a LapicState
assert_eq!(state, LapicState::from(&kvm_state));
}
#[test]
fn pit_state() {
let channel = PitChannelState {
count: 256,
latched_count: 512,
count_latched: PitRWState::LSB,
status_latched: false,
status: 7,
read_state: PitRWState::MSB,
write_state: PitRWState::Word1,
reload_value: 8,
rw_mode: PitRWMode::Both,
mode: 5,
bcd: false,
gate: true,
count_load_time: 1024,
};
let kvm_channel = kvm_pit_channel_state::from(&channel);
// compare the various field translations
assert_eq!(kvm_channel.count, 256);
assert_eq!(kvm_channel.latched_count, 512);
assert_eq!(kvm_channel.count_latched, 1);
assert_eq!(kvm_channel.status_latched, 0);
assert_eq!(kvm_channel.status, 7);
assert_eq!(kvm_channel.read_state, 2);
assert_eq!(kvm_channel.write_state, 4);
assert_eq!(kvm_channel.write_latch, 8);
assert_eq!(kvm_channel.rw_mode, 3);
assert_eq!(kvm_channel.mode, 5);
assert_eq!(kvm_channel.bcd, 0);
assert_eq!(kvm_channel.gate, 1);
assert_eq!(kvm_channel.count_load_time, 1024);
// convert back and compare
assert_eq!(channel, PitChannelState::from(&kvm_channel));
// convert the full pitstate
let state = PitState {
channels: [channel, channel, channel],
flags: 255,
};
let kvm_state = kvm_pit_state2::from(&state);
assert_eq!(kvm_state.flags, 255);
// compare a channel
assert_eq!(channel, PitChannelState::from(&kvm_state.channels[0]));
// convert back and compare
assert_eq!(state, PitState::from(&kvm_state));
}
#[test]
fn clock_handling() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let mut clock_data = vm.get_pvclock().unwrap();
clock_data.clock += 1000;
vm.set_pvclock(&clock_data).unwrap();
}
#[test]
fn set_gsi_routing() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
vm.set_gsi_routing(&[]).unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Ioapic,
pin: 3,
},
}])
.unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
}])
.unwrap();
vm.set_gsi_routing(&[
IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: IrqSourceChip::Ioapic,
pin: 3,
},
},
IrqRoute {
gsi: 2,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
},
])
.unwrap();
}
#[test]
fn set_identity_map_addr() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.set_identity_map_addr(GuestAddress(0x20000)).unwrap();
}
#[test]
fn mp_state() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
let vcpu: KvmVcpu = vm.create_kvm_vcpu(0).unwrap();
let state = vcpu.get_mp_state().unwrap();
vcpu.set_mp_state(&state).unwrap();
}
#[test]
fn enable_feature() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
vm.create_irq_chip().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
unsafe { vcpu.enable_raw_capability(kvm_sys::KVM_CAP_HYPERV_SYNIC, &[0; 4]) }.unwrap();
}
#[test]
fn from_fpu() {
// Fpu has the largest arrays in our struct adapters. Test that they're small enough for
// Rust to copy.
let mut fpu: Fpu = Default::default();
let m = fpu.xmm.len();
let n = fpu.xmm[0].len();
fpu.xmm[m - 1][n - 1] = 42;
let fpu = kvm_fpu::from(&fpu);
assert_eq!(fpu.xmm.len(), m);
assert_eq!(fpu.xmm[0].len(), n);
assert_eq!(fpu.xmm[m - 1][n - 1], 42);
}
#[test]
fn debugregs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut dregs = vcpu.get_debugregs().unwrap();
dregs.dr7 = 13;
vcpu.set_debugregs(&dregs).unwrap();
let dregs2 = vcpu.get_debugregs().unwrap();
assert_eq!(dregs.dr7, dregs2.dr7);
}
#[test]
fn xcrs() {
let kvm = Kvm::new().unwrap();
if !kvm.check_capability(HypervisorCap::Xcrs) {
return;
}
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut xcrs = vcpu.get_xcrs().unwrap();
xcrs[0].value = 1;
vcpu.set_xcrs(&xcrs).unwrap();
let xcrs2 = vcpu.get_xcrs().unwrap();
assert_eq!(xcrs[0].value, xcrs2[0].value);
}
#[test]
fn get_msrs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let mut msrs = vec![
// This one should succeed
Register {
id: 0x0000011e,
..Default::default()
},
// This one will fail to fetch
Register {
id: 0x000003f1,
..Default::default()
},
];
vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(msrs.len(), 1);
}
#[test]
fn set_msrs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
const MSR_TSC_AUX: u32 = 0xc0000103;
let mut msrs = vec![Register {
id: MSR_TSC_AUX,
value: 42,
}];
vcpu.set_msrs(&msrs).unwrap();
msrs[0].value = 0;
vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(msrs.len(), 1);
assert_eq!(msrs[0].id, MSR_TSC_AUX);
assert_eq!(msrs[0].value, 42);
}
#[test]
fn get_hyperv_cpuid() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let cpuid = vcpu.get_hyperv_cpuid();
// Older kernels don't support so tolerate this kind of failure.
match cpuid {
Ok(_) => {}
Err(e) => {
assert_eq!(e.errno(), EINVAL);
}
}
}

View file

@ -85,8 +85,6 @@ CRATE_OPTIONS: Dict[str, List[TestOption]] = {
"fuzz": [TestOption.DO_NOT_BUILD],
"hypervisor": [
TestOption.DO_NOT_RUN_AARCH64,
TestOption.DO_NOT_RUN_ARMHF,
TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL,
], # b/181672912
"e2e_tests": [ # b/180196508
TestOption.LARGE,