crosvm: add handling for hyperv exits

When features for Hyper-V are enabled there's a another type of exit
that can be triggered.  This change attempts to add support for those
types of exits.

BUG=b:150151095
TEST=ran build_test

Change-Id: I3131a2c8d9c610576ac177dbfe82f78e8d5dbfb1
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2073254
Reviewed-by: Matt Delco <delco@chromium.org>
Tested-by: Matt Delco <delco@chromium.org>
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Matt Delco <delco@chromium.org>
Auto-Submit: Matt Delco <delco@chromium.org>
This commit is contained in:
Matt Delco 2020-02-25 18:01:05 -08:00 committed by Commit Bot
parent b9f4c9bca3
commit d2a862b41f
10 changed files with 817 additions and 9 deletions

View file

@ -47,7 +47,7 @@ extern "C" {
* do not indicate anything about what version of crosvm is running.
*/
#define CROSVM_API_MAJOR 0
#define CROSVM_API_MINOR 21
#define CROSVM_API_MINOR 22
#define CROSVM_API_PATCH 0
enum crosvm_address_space {
@ -495,6 +495,16 @@ enum crosvm_vcpu_event_kind {
* a `crosvm_pause_vcpus` call.
*/
CROSVM_VCPU_EVENT_KIND_PAUSED,
/*
* Hyper-V hypercall.
*/
CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL,
/*
* Hyper-V synic change.
*/
CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC,
};
struct crosvm_vcpu_event {
@ -553,6 +563,31 @@ struct crosvm_vcpu_event {
/* CROSVM_VCPU_EVENT_KIND_PAUSED */
void *user;
/* CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL */
struct {
/*
* The |input| and |params| members are populated for the plugin to use.
* The |result| member is populated by the API to point to a uint64_t
* that the plugin should update before resuming.
*/
uint64_t input;
uint64_t *result;
uint64_t params[2];
} hyperv_call;
/* CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC */
struct {
/*
* The |msr|, |control|, |evt_page|, and |msg_page| fields are populated
* for the plugin to use.
*/
uint32_t msr;
uint32_t _reserved;
uint64_t control;
uint64_t evt_page;
uint64_t msg_page;
} hyperv_synic;
uint8_t _reserved[64];
};
};

View file

@ -59,6 +59,8 @@ const CROSVM_IRQ_ROUTE_MSI: u32 = 1;
const CROSVM_VCPU_EVENT_KIND_INIT: u32 = 0;
const CROSVM_VCPU_EVENT_KIND_IO_ACCESS: u32 = 1;
const CROSVM_VCPU_EVENT_KIND_PAUSED: u32 = 2;
const CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL: u32 = 3;
const CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC: u32 = 4;
#[repr(C)]
#[derive(Copy, Clone)]
@ -925,10 +927,30 @@ struct anon_io_access {
__reserved1: [u8; 2],
}
#[derive(Copy, Clone)]
#[repr(C)]
struct anon_hyperv_call {
input: u64,
result: *mut u8,
params: [u64; 2],
}
#[derive(Copy, Clone)]
#[repr(C)]
struct anon_hyperv_synic {
msr: u32,
reserved: u32,
control: u64,
evt_page: u64,
msg_page: u64,
}
#[repr(C)]
union anon_vcpu_event {
io_access: anon_io_access,
user: *mut c_void,
hyperv_call: anon_hyperv_call,
hyperv_synic: anon_hyperv_synic,
#[allow(dead_code)]
__reserved: [u8; 64],
}
@ -1118,6 +1140,33 @@ impl crosvm_vcpu {
self.sregs.get = false;
self.debugregs.get = false;
Ok(())
} else if wait.has_hyperv_call() {
let hv: &VcpuResponse_Wait_HypervCall = wait.get_hyperv_call();
event.kind = CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL;
self.resume_data = vec![0; 8];
event.event.hyperv_call = anon_hyperv_call {
input: hv.input,
result: self.resume_data.as_mut_ptr(),
params: [hv.params0, hv.params1],
};
self.regs.get = false;
self.sregs.get = false;
self.debugregs.get = false;
Ok(())
} else if wait.has_hyperv_synic() {
let hv: &VcpuResponse_Wait_HypervSynic = wait.get_hyperv_synic();
event.kind = CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC;
event.event.hyperv_synic = anon_hyperv_synic {
msr: hv.msr,
reserved: 0,
control: hv.control,
evt_page: hv.evt_page,
msg_page: hv.msg_page,
};
self.regs.get = false;
self.sregs.get = false;
self.debugregs.get = false;
Ok(())
} else {
Err(EPROTO)
}

View file

@ -1124,6 +1124,16 @@ pub enum VcpuExit {
IoapicEoi {
vector: u8,
},
HypervSynic {
msr: u32,
control: u64,
evt_page: u64,
msg_page: u64,
},
HypervHcall {
input: u64,
params: [u64; 2],
},
Unknown,
Exception,
Hypercall,
@ -1243,10 +1253,10 @@ impl Vcpu {
&self.guest_mem
}
/// Sets the data received by an mmio or ioport read/in instruction.
/// Sets the data received by a mmio read, ioport in, or hypercall instruction.
///
/// This function should be called after `Vcpu::run` returns an `VcpuExit::IoIn` or
/// `Vcpu::MmioRead`.
/// This function should be called after `Vcpu::run` returns an `VcpuExit::IoIn`,
/// `VcpuExit::MmioRead`, or 'VcpuExit::HypervHcall`.
#[allow(clippy::cast_ptr_alignment)]
pub fn set_data(&self, data: &[u8]) -> Result<()> {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
@ -1288,6 +1298,20 @@ impl Vcpu {
mmio.data[..len].copy_from_slice(data);
Ok(())
}
KVM_EXIT_HYPERV => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let hyperv = unsafe { &mut run.__bindgen_anon_1.hyperv };
if hyperv.type_ != KVM_EXIT_HYPERV_HCALL {
return Err(Error::new(EINVAL));
}
let hcall = unsafe { &mut hyperv.u.hcall };
if data.len() != std::mem::size_of::<u64>() {
return Err(Error::new(EINVAL));
}
hcall.result.to_ne_bytes().copy_from_slice(data);
Ok(())
}
_ => Err(Error::new(EINVAL)),
}
}
@ -1840,6 +1864,30 @@ impl RunnableVcpu {
let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
Ok(VcpuExit::IoapicEoi { vector })
}
KVM_EXIT_HYPERV => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let hyperv = unsafe { &run.__bindgen_anon_1.hyperv };
match hyperv.type_ as u32 {
KVM_EXIT_HYPERV_SYNIC => {
let synic = unsafe { &hyperv.u.synic };
Ok(VcpuExit::HypervSynic {
msr: synic.msr,
control: synic.control,
evt_page: synic.evt_page,
msg_page: synic.msg_page,
})
}
KVM_EXIT_HYPERV_HCALL => {
let hcall = unsafe { &hyperv.u.hcall };
Ok(VcpuExit::HypervHcall {
input: hcall.input,
params: hcall.params,
})
}
_ => Err(Error::new(EINVAL)),
}
}
KVM_EXIT_UNKNOWN => Ok(VcpuExit::Unknown),
KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall),

View file

@ -274,11 +274,14 @@ pub const KVM_EXIT_EPR: ::std::os::raw::c_uint = 23;
pub const KVM_EXIT_SYSTEM_EVENT: ::std::os::raw::c_uint = 24;
pub const KVM_EXIT_S390_STSI: ::std::os::raw::c_uint = 25;
pub const KVM_EXIT_IOAPIC_EOI: ::std::os::raw::c_uint = 26;
pub const KVM_EXIT_HYPERV: ::std::os::raw::c_uint = 27;
pub const KVM_INTERNAL_ERROR_EMULATION: ::std::os::raw::c_uint = 1;
pub const KVM_INTERNAL_ERROR_SIMUL_EX: ::std::os::raw::c_uint = 2;
pub const KVM_INTERNAL_ERROR_DELIVERY_EV: ::std::os::raw::c_uint = 3;
pub const KVM_EXIT_IO_IN: ::std::os::raw::c_uint = 0;
pub const KVM_EXIT_IO_OUT: ::std::os::raw::c_uint = 1;
pub const KVM_EXIT_HYPERV_SYNIC: ::std::os::raw::c_uint = 1;
pub const KVM_EXIT_HYPERV_HCALL: ::std::os::raw::c_uint = 2;
pub const KVM_S390_RESET_POR: ::std::os::raw::c_uint = 1;
pub const KVM_S390_RESET_CLEAR: ::std::os::raw::c_uint = 2;
pub const KVM_S390_RESET_SUBSYSTEM: ::std::os::raw::c_uint = 4;
@ -1694,6 +1697,7 @@ pub union kvm_run__bindgen_ty_1 {
pub system_event: kvm_run__bindgen_ty_1__bindgen_ty_17,
pub s390_stsi: kvm_run__bindgen_ty_1__bindgen_ty_18,
pub eoi: kvm_run__bindgen_ty_1__bindgen_ty_19,
pub hyperv: kvm_hyperv_exit,
pub padding: [::std::os::raw::c_char; 256usize],
_bindgen_union_align: [u64; 32usize],
}
@ -2833,6 +2837,174 @@ fn bindgen_test_layout_kvm_run__bindgen_ty_1__bindgen_ty_19() {
)
);
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct kvm_hyperv_exit {
pub type_: __u32,
pub pad: __u32,
pub u: kvm_hyperv_exit__bindgen_ty_1,
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit>(),
40usize,
concat!("Size of: ", stringify!(kvm_hyperv_exit))
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit>(),
8usize,
concat!("Alignment of ", stringify!(kvm_hyperv_exit))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<kvm_hyperv_exit>())).u as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit),
"::",
stringify!(u)
)
);
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union kvm_hyperv_exit__bindgen_ty_1 {
pub synic: kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1,
pub hcall: kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2,
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit__bindgen_ty_1>(),
32usize,
concat!("Size of: ", stringify!(kvm_hyperv_exit__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit__bindgen_ty_1>(),
8usize,
concat!("Alignment of ", stringify!(kvm_hyperv_exit__bindgen_ty_1))
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1 {
pub msr: __u32,
pub pad: __u32,
pub control: __u64,
pub evt_page: __u64,
pub msg_page: __u64,
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>(),
32usize,
concat!(
"Size of: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1)
)
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>(),
8usize,
concat!(
"Alignment of ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>())).control
as *const _ as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1),
"::",
stringify!(control)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>())).evt_page
as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1),
"::",
stringify!(evt_page)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>())).msg_page
as *const _ as usize
},
24usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1),
"::",
stringify!(msg_page)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2 {
pub input: __u64,
pub result: __u64,
pub params: [__u64; 2],
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>(),
32usize,
concat!(
"Size of: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2)
)
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>(),
8usize,
concat!(
"Alignment of ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>())).result
as *const _ as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2),
"::",
stringify!(result)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>())).params
as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2),
"::",
stringify!(params)
)
);
}
#[test]
fn bindgen_test_layout_kvm_run__bindgen_ty_1() {
assert_eq!(

View file

@ -8,9 +8,6 @@
use sys_util::{ioctl_io_nr, ioctl_ior_nr, ioctl_iow_nr, ioctl_iowr_nr};
// Somehow this one gets missed by bindgen
pub const KVM_EXIT_IO_OUT: ::std::os::raw::c_uint = 1;
// Each of the below modules defines ioctls specific to their platform.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]

View file

@ -251,11 +251,14 @@ pub const KVM_EXIT_EPR: ::std::os::raw::c_uint = 23;
pub const KVM_EXIT_SYSTEM_EVENT: ::std::os::raw::c_uint = 24;
pub const KVM_EXIT_S390_STSI: ::std::os::raw::c_uint = 25;
pub const KVM_EXIT_IOAPIC_EOI: ::std::os::raw::c_uint = 26;
pub const KVM_EXIT_HYPERV: ::std::os::raw::c_uint = 27;
pub const KVM_INTERNAL_ERROR_EMULATION: ::std::os::raw::c_uint = 1;
pub const KVM_INTERNAL_ERROR_SIMUL_EX: ::std::os::raw::c_uint = 2;
pub const KVM_INTERNAL_ERROR_DELIVERY_EV: ::std::os::raw::c_uint = 3;
pub const KVM_EXIT_IO_IN: ::std::os::raw::c_uint = 0;
pub const KVM_EXIT_IO_OUT: ::std::os::raw::c_uint = 1;
pub const KVM_EXIT_HYPERV_SYNIC: ::std::os::raw::c_uint = 1;
pub const KVM_EXIT_HYPERV_HCALL: ::std::os::raw::c_uint = 2;
pub const KVM_S390_RESET_POR: ::std::os::raw::c_uint = 1;
pub const KVM_S390_RESET_CLEAR: ::std::os::raw::c_uint = 2;
pub const KVM_S390_RESET_SUBSYSTEM: ::std::os::raw::c_uint = 4;
@ -4084,6 +4087,7 @@ pub union kvm_run__bindgen_ty_1 {
pub system_event: kvm_run__bindgen_ty_1__bindgen_ty_17,
pub s390_stsi: kvm_run__bindgen_ty_1__bindgen_ty_18,
pub eoi: kvm_run__bindgen_ty_1__bindgen_ty_19,
pub hyperv: kvm_hyperv_exit,
pub padding: [::std::os::raw::c_char; 256usize],
_bindgen_union_align: [u64; 32usize],
}
@ -5223,7 +5227,174 @@ fn bindgen_test_layout_kvm_run__bindgen_ty_1__bindgen_ty_19() {
)
);
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct kvm_hyperv_exit {
pub type_: __u32,
pub pad: __u32,
pub u: kvm_hyperv_exit__bindgen_ty_1,
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit>(),
40usize,
concat!("Size of: ", stringify!(kvm_hyperv_exit))
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit>(),
8usize,
concat!("Alignment of ", stringify!(kvm_hyperv_exit))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<kvm_hyperv_exit>())).u as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit),
"::",
stringify!(u)
)
);
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union kvm_hyperv_exit__bindgen_ty_1 {
pub synic: kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1,
pub hcall: kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2,
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit__bindgen_ty_1>(),
32usize,
concat!("Size of: ", stringify!(kvm_hyperv_exit__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit__bindgen_ty_1>(),
8usize,
concat!("Alignment of ", stringify!(kvm_hyperv_exit__bindgen_ty_1))
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1 {
pub msr: __u32,
pub pad: __u32,
pub control: __u64,
pub evt_page: __u64,
pub msg_page: __u64,
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>(),
32usize,
concat!(
"Size of: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1)
)
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>(),
8usize,
concat!(
"Alignment of ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>())).control
as *const _ as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1),
"::",
stringify!(control)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>())).evt_page
as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1),
"::",
stringify!(evt_page)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1>())).msg_page
as *const _ as usize
},
24usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_1),
"::",
stringify!(msg_page)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2 {
pub input: __u64,
pub result: __u64,
pub params: [__u64; 2],
}
#[test]
fn bindgen_test_layout_kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2() {
assert_eq!(
::std::mem::size_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>(),
32usize,
concat!(
"Size of: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2)
)
);
assert_eq!(
::std::mem::align_of::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>(),
8usize,
concat!(
"Alignment of ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>())).result
as *const _ as usize
},
8usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2),
"::",
stringify!(result)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2>())).params
as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(kvm_hyperv_exit__bindgen_ty_1__bindgen_ty_2),
"::",
stringify!(params)
)
);
}
#[test]
fn bindgen_test_layout_kvm_run__bindgen_ty_1() {
assert_eq!(

View file

@ -403,15 +403,30 @@ message VcpuResponse {
bytes debugregs = 8;
}
// This type of wait reason is only generated after a PuaseVcpus request on this VCPU.
// This type of wait reason is only generated after a PauseVcpus request on this VCPU.
message User {
uint64 user = 1;
}
message HypervCall {
uint64 input = 1;
uint64 params0 = 2;
uint64 params1 = 3;
}
message HypervSynic {
uint32 msr = 1;
uint64 control = 2;
uint64 evt_page = 3;
uint64 msg_page = 4;
}
oneof exit {
Init init = 1;
Io io = 2;
User user = 3;
HypervCall hyperv_call = 4;
HypervSynic hyperv_synic = 5;
}
}

View file

@ -517,6 +517,21 @@ pub fn run_vcpus(
&vcpu,
);
}
VcpuExit::HypervHcall { input, params } => {
let mut data = [0; 8];
vcpu_plugin.hyperv_call(input, params, &mut data, &vcpu);
// Setting data for hyperv call can not fail.
let _ = vcpu.set_data(&data);
}
VcpuExit::HypervSynic {
msr,
control,
evt_page,
msg_page,
} => {
vcpu_plugin
.hyperv_synic(msr, control, evt_page, msg_page, &vcpu);
}
VcpuExit::Hlt => break,
VcpuExit::Shutdown => break,
VcpuExit::InternalError => {

View file

@ -513,6 +513,54 @@ impl PluginVcpu {
self.process(IoSpace::Mmio, addr, VcpuRunData::Write(data), vcpu)
}
/// Has the plugin process handle a hyper-v call.
pub fn hyperv_call(&self, input: u64, params: [u64; 2], data: &mut [u8], vcpu: &Vcpu) -> bool {
let mut wait_reason = VcpuResponse_Wait::new();
let hv = wait_reason.mut_hyperv_call();
hv.input = input;
hv.params0 = params[0];
hv.params1 = params[1];
self.wait_reason.set(Some(wait_reason));
match self.handle_until_resume(vcpu) {
Ok(resume_data) => {
data.copy_from_slice(&resume_data);
true
}
Err(e) if e.errno() == EPIPE => false,
Err(e) => {
error!("failed to process hyperv call request: {}", e);
false
}
}
}
/// Has the plugin process handle a synic config change.
pub fn hyperv_synic(
&self,
msr: u32,
control: u64,
evt_page: u64,
msg_page: u64,
vcpu: &Vcpu,
) -> bool {
let mut wait_reason = VcpuResponse_Wait::new();
let hv = wait_reason.mut_hyperv_synic();
hv.msr = msr;
hv.control = control;
hv.evt_page = evt_page;
hv.msg_page = msg_page;
self.wait_reason.set(Some(wait_reason));
match self.handle_until_resume(vcpu) {
Ok(_resume_data) => true,
Err(e) if e.errno() == EPIPE => false,
Err(e) => {
error!("failed to process hyperv synic request: {}", e);
false
}
}
}
fn handle_request(&self, vcpu: &Vcpu) -> SysResult<Option<Vec<u8>>> {
let mut wait_reason = self.wait_reason.take();
let mut do_recv = true;

View file

@ -5,14 +5,159 @@
*/
#include <errno.h>
#include <fcntl.h>
#include <linux/kvm.h>
#include <linux/memfd.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "crosvm.h"
#define KILL_ADDRESS 0x3f9
#ifndef F_LINUX_SPECIFIC_BASE
#define F_LINUX_SPECIFIC_BASE 1024
#endif
#ifndef F_ADD_SEALS
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
#endif
#ifndef F_SEAL_SHRINK
#define F_SEAL_SHRINK 0x0002
#endif
const uint8_t code[] = {
// Set a non-zero value for HV_X64_MSR_GUEST_OS_ID
// to enable hypercalls.
// mov edx, 0xffffffff
0x66, 0xba, 0xff, 0xff, 0xff, 0xff,
// mov eax, 0xffffffff
0x66, 0xb8, 0xff, 0xff, 0xff, 0xff,
// mov ecx, 0x40000000 # HV_X64_MSR_GUEST_OS_ID
0x66, 0xb9, 0x00, 0x00, 0x00, 0x40,
// wrmsr
0x0f, 0x30,
// Establish page at 0x2000 as the hypercall page.
// mov edx, 0x00000000
0x66, 0xba, 0x00, 0x00, 0x00, 0x00,
// mov eax, 0x00002001 # lowest bit is enable bit
0x66, 0xb8, 0x01, 0x20, 0x00, 0x00,
// mov ecx, 0x40000001 # HV_X64_MSR_HYPERCALL
0x66, 0xb9, 0x01, 0x00, 0x00, 0x40,
// wrmsr
0x0f, 0x30,
// We can't test generic hypercalls since they're
// defined to UD for processors running in real mode.
// for HV_X64_MSR_CONTROL:
// edx:eax gets transferred as 'control'
// mov edx, 0x05060708
0x66, 0xba, 0x08, 0x07, 0x06, 0x05,
// mov eax, 0x01020304
0x66, 0xb8, 0x04, 0x03, 0x02, 0x01,
// mov ecx, 0x40000080 # HV_X64_MSR_SCONTROL
0x66, 0xb9, 0x80, 0x00, 0x00, 0x40,
// wrmsr
0x0f, 0x30,
// Establish page at 0x3000 as the evt_page.
// mov edx, 0x00000000
0x66, 0xba, 0x00, 0x00, 0x00, 0x00,
// mov eax, 0x00003000
0x66, 0xb8, 0x00, 0x30, 0x00, 0x00,
// mov ecx, 0x40000082 # HV_X64_MSR_SIEFP
0x66, 0xb9, 0x82, 0x00, 0x00, 0x40,
// wrmsr
0x0f, 0x30,
// Establish page at 0x4000 as the 'msg_page'.
// mov edx, 0x00000000
0x66, 0xba, 0x00, 0x00, 0x00, 0x00,
// mov eax, 0x00004000
0x66, 0xb8, 0x00, 0x40, 0x00, 0x00,
// mov ecx, 0x40000083 # HV_X64_MSR_SIMP
0x66, 0xb9, 0x83, 0x00, 0x00, 0x40,
// wrmsr
0x0f, 0x30,
// Request a kill.
// mov dx, 0x3f9
0xba, 0xf9, 0x03,
// mov al, 0x1
0xb0, 0x01,
// out dx, al
0xee,
// hlt
0xf4
};
int check_synic_access(struct crosvm_vcpu* vcpu, struct crosvm_vcpu_event *evt,
uint32_t msr, uint64_t control, uint64_t evt_page,
uint64_t msg_page, const char *phase) {
if (evt->kind != CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC) {
fprintf(stderr, "Got incorrect exit type before %s: %d\n", phase,
evt->kind);
return 1;
}
if (evt->hyperv_synic.msr != msr ||
evt->hyperv_synic._reserved != 0 ||
evt->hyperv_synic.control != control ||
evt->hyperv_synic.evt_page != evt_page ||
evt->hyperv_synic.msg_page != msg_page) {
fprintf(stderr, "Got unexpected synic message after %s: "
"0x%x vs 0x%x, 0x%lx vs 0x%lx, 0x%lx vs 0x%lx, "
"0x%lx vs 0x%lx\n",
phase, msr, evt->hyperv_synic.msr,
control, evt->hyperv_synic.control,
evt_page, evt->hyperv_synic.evt_page,
msg_page, evt->hyperv_synic.msg_page);
return 1;
}
if (crosvm_vcpu_resume(vcpu) != 0) {
fprintf(stderr, "Failed to resume after %s\n", phase);
return 1;
}
if (crosvm_vcpu_wait(vcpu, evt) != 0) {
fprintf(stderr, "Failed to wait after %s\n", phase);
return 1;
}
return 0;
}
int main(int argc, char** argv) {
struct crosvm* crosvm = NULL;
uint64_t cap_args[4] = {0};
@ -23,6 +168,53 @@ int main(int argc, char** argv) {
return 1;
}
ret = crosvm_reserve_range(crosvm, CROSVM_ADDRESS_SPACE_IOPORT,
KILL_ADDRESS, 1);
if (ret) {
fprintf(stderr, "failed to reserve kill port: %d\n", ret);
return 1;
}
// VM mem layout:
// null page, code page, hypercall page, synic evt_page, synic msg_page
int mem_size = 0x4000;
int mem_fd = syscall(SYS_memfd_create, "guest_mem",
MFD_CLOEXEC | MFD_ALLOW_SEALING);
if (mem_fd < 0) {
fprintf(stderr, "failed to create guest memfd: %d\n", errno);
return 1;
}
ret = ftruncate(mem_fd, mem_size);
if (ret) {
fprintf(stderr, "failed to set size of guest memory: %d\n", errno);
return 1;
}
uint8_t *mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED,
mem_fd, 0x0);
if (mem == MAP_FAILED) {
fprintf(stderr, "failed to mmap guest memory: %d\n", errno);
return 1;
}
fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK);
memcpy(mem, code, sizeof(code));
// Before MSR verify hypercall page is zero
int i;
for (i = 0; i < 5; ++i) {
if (mem[0x1000 + i]) {
fprintf(stderr, "Hypercall page isn't zero\n");
return 1;
}
}
struct crosvm_memory *mem_obj;
ret = crosvm_create_memory(crosvm, mem_fd, 0x0, mem_size, 0x1000,
false, false, &mem_obj);
if (ret) {
fprintf(stderr, "failed to create memory in crosvm: %d\n", ret);
return 1;
}
struct crosvm_vcpu* vcpu = NULL;
ret = crosvm_get_vcpu(crosvm, 0, &vcpu);
if (ret) {
@ -61,5 +253,71 @@ int main(int argc, char** argv) {
return 1;
}
{
struct kvm_sregs sregs = {0};
crosvm_vcpu_get_sregs(vcpu, &sregs);
sregs.cs.base = 0;
sregs.cs.selector = 0;
sregs.es.base = 0;
sregs.es.selector = 0;
crosvm_vcpu_set_sregs(vcpu, &sregs);
struct kvm_regs regs = {0};
crosvm_vcpu_get_regs(vcpu, &regs);
regs.rip = 0x1000;
regs.rflags = 2;
crosvm_vcpu_set_regs(vcpu, &regs);
}
if (crosvm_vcpu_resume(vcpu) != 0) {
fprintf(stderr, "Failed to resume after init\n");
return 1;
}
if (crosvm_vcpu_wait(vcpu, &evt) != 0) {
fprintf(stderr, "Failed to wait after init\n");
return 1;
}
if (check_synic_access(vcpu, &evt, 0x40000080, 0x506070801020304, 0, 0,
"synic msg #1")) {
return 1;
}
// After first MSR verify hypercall page is non-zero
uint8_t value = 0;
for (i = 0; i < 5; ++i) {
value |= mem[0x1000+i];
}
if (value == 0) {
fprintf(stderr, "Hypercall page is still zero\n");
return 1;
}
if (check_synic_access(vcpu, &evt, 0x40000082, 0x506070801020304, 0x3000,
0, "synic msg #2")) {
return 1;
}
if (check_synic_access(vcpu, &evt, 0x40000083, 0x506070801020304, 0x3000,
0x4000, "synic msg #3")) {
return 1;
}
if (evt.kind != CROSVM_VCPU_EVENT_KIND_IO_ACCESS) {
fprintf(stderr, "Got incorrect exit type after synic #3: %d\n",
evt.kind);
return 1;
}
if (evt.io_access.address_space != CROSVM_ADDRESS_SPACE_IOPORT ||
evt.io_access.address != KILL_ADDRESS ||
!evt.io_access.is_write ||
evt.io_access.length != 1 ||
evt.io_access.data[0] != 1) {
fprintf(stderr, "Didn't see kill request from VM\n");
return 1;
}
fprintf(stderr, "Saw kill request from VM, exiting\n");
return 0;
}