Revert "vm-memory: mlock2(MLOCK_ONFAULT) guest memory for protected VMs"

This reverts commit b975546c3f.

Reason for revert: mlock is insufficient to prevent migration/compacting of guest memory, and therefore pKVM has been modified to perform pinning in the kernel, making the mlock call superfluous.

Original change's description:
> vm-memory: mlock2(MLOCK_ONFAULT) guest memory for protected VMs
>
> By default, the memory of a protected VM is inaccessible to the host
> and crosvm. Consequently, attempts to access guest memory are fatal and
> must be avoided in order for the guest to run.
>
> Mlock guest pages as they are faulted in for protected VMs, ensuring
> that the host doesn't try to age or swap them out as a result of memory
> pressure.
>
> Bug: b:204298056
> Test: cargo test on x86 and arm64
> Cc: Quentin Perret <qperret@google.com>
> Cc: Andrew Walbran <qwandor@google.com>
> Signed-off-by: Will Deacon <willdeacon@google.com>
> Change-Id: I618ec1e8b1136a47a8b3ef563e45bc41d75ab517
> Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3257689
> Tested-by: kokoro <noreply+kokoro@google.com>
> Reviewed-by: Chirantan Ekbote <chirantan@chromium.org>

Bug: b:204298056
Change-Id: Ibdcc579805c47adf35412b732829c074ce038471
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3310884
Tested-by: kokoro <noreply+kokoro@google.com>
Commit-Queue: Quentin Perret <qperret@google.com>
Auto-Submit: Quentin Perret <qperret@google.com>
Reviewed-by: Will Deacon <willdeacon@google.com>
Reviewed-by: Andrew Walbran <qwandor@google.com>
Reviewed-by: Chirantan Ekbote <chirantan@chromium.org>
This commit is contained in:
Quentin Perret 2021-12-02 09:48:43 +00:00 committed by Commit Bot
parent 88b671a469
commit 2620380106
5 changed files with 6 additions and 49 deletions

View file

@ -42,10 +42,6 @@ impl MemoryMapping {
self.mapping.use_hugepages()
}
pub fn mlock_on_fault(&self) -> Result<()> {
self.mapping.mlock_on_fault()
}
pub fn read_to_memory(
&self,
mem_offset: usize,

View file

@ -19,8 +19,6 @@ use data_model::DataInit;
use crate::{errno, pagesize};
const MLOCK_ONFAULT: libc::c_int = 1;
#[sorted]
#[derive(Debug, thiserror::Error)]
pub enum Error {
@ -413,23 +411,6 @@ impl MemoryMapping {
}
}
/// Mlock the guest pages as they are faulted in
pub fn mlock_on_fault(&self) -> Result<()> {
let ret = unsafe {
// TODO: Switch to libc::mlock2 once https://github.com/rust-lang/libc/pull/2525 lands
libc::syscall(
libc::SYS_mlock2,
self.as_ptr() as *mut libc::c_void,
self.size(),
MLOCK_ONFAULT,
)
};
if ret == -1 {
return Err(Error::SystemCallFailed(errno::Error::last()));
}
Ok(())
}
/// Calls msync with MS_SYNC on the mapping.
pub fn msync(&self) -> Result<()> {
// This is safe since we use the exact address and length of a known

View file

@ -2450,12 +2450,7 @@ pub fn run_config(cfg: Config) -> Result<()> {
if components.hugepages {
mem_policy |= MemoryPolicy::USE_HUGEPAGES;
}
if components.protected_vm == ProtectionType::Protected {
mem_policy |= MemoryPolicy::MLOCK_ON_FAULT;
}
guest_mem
.set_memory_policy(mem_policy)
.context("failed to set guest memory policy")?;
guest_mem.set_memory_policy(mem_policy);
let kvm = Kvm::new_with_path(&cfg.kvm_device_path).context("failed to create kvm")?;
let vm = KvmVm::new(&kvm, guest_mem).context("failed to create vm")?;
let vm_clone = vm.try_clone().context("failed to clone vm")?;

View file

@ -35,7 +35,7 @@ use base::{
use kvm::{Cap, Datamatch, IoeventAddress, Kvm, Vcpu, VcpuExit, Vm};
use minijail::{self, Minijail};
use net_util::{Error as TapError, Tap, TapT};
use vm_memory::{GuestMemory, GuestMemoryError, MemoryPolicy};
use vm_memory::{GuestMemory, MemoryPolicy};
use self::process::*;
use self::vcpu::*;
@ -138,8 +138,6 @@ pub enum Error {
RootNotDir,
#[error("failed to set gidmap for jail: {0}")]
SetGidMap(minijail::Error),
#[error("failed to set guest memory policy: {0}")]
SetMemoryPolicy(GuestMemoryError),
#[error("failed to set uidmap for jail: {0}")]
SetUidMap(minijail::Error),
#[error("process {pid} died with signal {signo}, status {status}, and code {code}")]
@ -697,8 +695,7 @@ pub fn run_config(cfg: Config) -> Result<()> {
if cfg.hugepages {
mem_policy |= MemoryPolicy::USE_HUGEPAGES;
}
mem.set_memory_policy(mem_policy)
.map_err(Error::SetMemoryPolicy)?;
mem.set_memory_policy(mem_policy);
let kvm = Kvm::new_with_path(&cfg.kvm_device_path).map_err(Error::CreateKvm)?;
let mut vm = Vm::new(&kvm, mem).map_err(Error::CreateVm)?;
vm.create_irq_chip().map_err(Error::CreateIrqChip)?;

View file

@ -41,8 +41,6 @@ pub enum Error {
MemoryAddSealsFailed(SysError),
#[error("failed to create shm region")]
MemoryCreationFailed(SysError),
#[error("failed to lock {0} bytes of guest memory: {1}")]
MemoryLockingFailed(usize, MmapError),
#[error("failed to map guest memory: {0}")]
MemoryMappingFailed(MmapError),
#[error("shm regions must be page aligned")]
@ -66,7 +64,6 @@ pub type Result<T> = result::Result<T, Error>;
bitflags! {
pub struct MemoryPolicy: u32 {
const USE_HUGEPAGES = 1;
const MLOCK_ON_FAULT = 2;
}
}
@ -344,25 +341,16 @@ impl GuestMemory {
}
/// Handles guest memory policy hints/advices.
pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) -> Result<()> {
for (_, region) in self.regions.iter().enumerate() {
if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) {
if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
for (_, region) in self.regions.iter().enumerate() {
let ret = region.mapping.use_hugepages();
if let Err(err) = ret {
println!("Failed to enable HUGEPAGE for mapping {}", err);
}
}
if mem_policy.contains(MemoryPolicy::MLOCK_ON_FAULT) {
region
.mapping
.mlock_on_fault()
.map_err(|e| Error::MemoryLockingFailed(region.mapping.size(), e))?;
}
}
Ok(())
}
/// Perform the specified action on each region's addresses.