mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2025-02-05 10:10:41 +00:00
vm-memory: mlock2(MLOCK_ONFAULT) guest memory for protected VMs
By default, the memory of a protected VM is inaccessible to the host and crosvm. Consequently, attempts to access guest memory are fatal and must be avoided in order for the guest to run. Mlock guest pages as they are faulted in for protected VMs, ensuring that the host doesn't try to age or swap them out as a result of memory pressure. Bug: b:204298056 Test: cargo test on x86 and arm64 Cc: Quentin Perret <qperret@google.com> Cc: Andrew Walbran <qwandor@google.com> Signed-off-by: Will Deacon <willdeacon@google.com> Change-Id: I618ec1e8b1136a47a8b3ef563e45bc41d75ab517 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3257689 Tested-by: kokoro <noreply+kokoro@google.com> Reviewed-by: Chirantan Ekbote <chirantan@chromium.org>
This commit is contained in:
parent
3230422d59
commit
b975546c3f
5 changed files with 49 additions and 6 deletions
|
@ -42,6 +42,10 @@ impl MemoryMapping {
|
|||
self.mapping.use_hugepages()
|
||||
}
|
||||
|
||||
pub fn mlock_on_fault(&self) -> Result<()> {
|
||||
self.mapping.mlock_on_fault()
|
||||
}
|
||||
|
||||
pub fn read_to_memory(
|
||||
&self,
|
||||
mem_offset: usize,
|
||||
|
|
|
@ -19,6 +19,8 @@ use data_model::DataInit;
|
|||
|
||||
use crate::{errno, pagesize};
|
||||
|
||||
const MLOCK_ONFAULT: libc::c_int = 1;
|
||||
|
||||
#[sorted]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
|
@ -415,6 +417,23 @@ impl MemoryMapping {
|
|||
}
|
||||
}
|
||||
|
||||
/// Mlock the guest pages as they are faulted in
|
||||
pub fn mlock_on_fault(&self) -> Result<()> {
|
||||
let ret = unsafe {
|
||||
// TODO: Switch to libc::mlock2 once https://github.com/rust-lang/libc/pull/2525 lands
|
||||
libc::syscall(
|
||||
libc::SYS_mlock2,
|
||||
self.as_ptr() as *mut libc::c_void,
|
||||
self.size(),
|
||||
MLOCK_ONFAULT,
|
||||
)
|
||||
};
|
||||
if ret == -1 {
|
||||
return Err(Error::SystemCallFailed(errno::Error::last()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Calls msync with MS_SYNC on the mapping.
|
||||
pub fn msync(&self) -> Result<()> {
|
||||
// This is safe since we use the exact address and length of a known
|
||||
|
|
|
@ -2419,7 +2419,12 @@ pub fn run_config(cfg: Config) -> Result<()> {
|
|||
if components.hugepages {
|
||||
mem_policy |= MemoryPolicy::USE_HUGEPAGES;
|
||||
}
|
||||
guest_mem.set_memory_policy(mem_policy);
|
||||
if components.protected_vm == ProtectionType::Protected {
|
||||
mem_policy |= MemoryPolicy::MLOCK_ON_FAULT;
|
||||
}
|
||||
guest_mem
|
||||
.set_memory_policy(mem_policy)
|
||||
.context("failed to set guest memory policy")?;
|
||||
let kvm = Kvm::new_with_path(&cfg.kvm_device_path).context("failed to create kvm")?;
|
||||
let vm = KvmVm::new(&kvm, guest_mem).context("failed to create vm")?;
|
||||
let vm_clone = vm.try_clone().context("failed to clone vm")?;
|
||||
|
|
|
@ -35,7 +35,7 @@ use base::{
|
|||
use kvm::{Cap, Datamatch, IoeventAddress, Kvm, Vcpu, VcpuExit, Vm};
|
||||
use minijail::{self, Minijail};
|
||||
use net_util::{Error as TapError, Tap, TapT};
|
||||
use vm_memory::{GuestMemory, MemoryPolicy};
|
||||
use vm_memory::{GuestMemory, GuestMemoryError, MemoryPolicy};
|
||||
|
||||
use self::process::*;
|
||||
use self::vcpu::*;
|
||||
|
@ -138,6 +138,8 @@ pub enum Error {
|
|||
RootNotDir,
|
||||
#[error("failed to set gidmap for jail: {0}")]
|
||||
SetGidMap(minijail::Error),
|
||||
#[error("failed to set guest memory policy: {0}")]
|
||||
SetMemoryPolicy(GuestMemoryError),
|
||||
#[error("failed to set uidmap for jail: {0}")]
|
||||
SetUidMap(minijail::Error),
|
||||
#[error("process {pid} died with signal {signo}, status {status}, and code {code}")]
|
||||
|
@ -695,7 +697,8 @@ pub fn run_config(cfg: Config) -> Result<()> {
|
|||
if cfg.hugepages {
|
||||
mem_policy |= MemoryPolicy::USE_HUGEPAGES;
|
||||
}
|
||||
mem.set_memory_policy(mem_policy);
|
||||
mem.set_memory_policy(mem_policy)
|
||||
.map_err(Error::SetMemoryPolicy)?;
|
||||
let kvm = Kvm::new_with_path(&cfg.kvm_device_path).map_err(Error::CreateKvm)?;
|
||||
let mut vm = Vm::new(&kvm, mem).map_err(Error::CreateVm)?;
|
||||
vm.create_irq_chip().map_err(Error::CreateIrqChip)?;
|
||||
|
|
|
@ -40,6 +40,8 @@ pub enum Error {
|
|||
MemoryAddSealsFailed(SysError),
|
||||
#[error("failed to create shm region")]
|
||||
MemoryCreationFailed(SysError),
|
||||
#[error("failed to lock {0} bytes of guest memory: {1}")]
|
||||
MemoryLockingFailed(usize, MmapError),
|
||||
#[error("failed to map guest memory: {0}")]
|
||||
MemoryMappingFailed(MmapError),
|
||||
#[error("shm regions must be page aligned")]
|
||||
|
@ -63,6 +65,7 @@ pub type Result<T> = result::Result<T, Error>;
|
|||
bitflags! {
|
||||
pub struct MemoryPolicy: u32 {
|
||||
const USE_HUGEPAGES = 1;
|
||||
const MLOCK_ON_FAULT = 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -293,16 +296,25 @@ impl GuestMemory {
|
|||
}
|
||||
|
||||
/// Handles guest memory policy hints/advices.
|
||||
pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) {
|
||||
if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
|
||||
for (_, region) in self.regions.iter().enumerate() {
|
||||
pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) -> Result<()> {
|
||||
for (_, region) in self.regions.iter().enumerate() {
|
||||
if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
|
||||
let ret = region.mapping.use_hugepages();
|
||||
|
||||
if let Err(err) = ret {
|
||||
println!("Failed to enable HUGEPAGE for mapping {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
if mem_policy.contains(MemoryPolicy::MLOCK_ON_FAULT) {
|
||||
region
|
||||
.mapping
|
||||
.mlock_on_fault()
|
||||
.map_err(|e| Error::MemoryLockingFailed(region.mapping.size(), e))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Perform the specified action on each region's addresses.
|
||||
|
|
Loading…
Reference in a new issue