vm_control: Add MmapAndRegisterMemory request

Add a vm_control::MmapAndRegisterMemory request to ask the main process
to do mmap() for given FDs to the given shm and register the region to
the guest.

This request will be used by pmem-ext2 device in the next CL.

BUG=b:329359333
TEST=presubmit

Change-Id: I365fc582f832401ada0556b28beda078d687a53f
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5872163
Reviewed-by: Takaya Saeki <takayas@chromium.org>
Commit-Queue: Keiichi Watanabe <keiichiw@chromium.org>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
Keiichi Watanabe 2024-09-18 19:22:53 +09:00 committed by crosvm LUCI
parent c25b9ff1eb
commit a278ec9a99
3 changed files with 178 additions and 8 deletions

View file

@ -2308,9 +2308,9 @@ fn start_pci_root_worker(
})
.context("failed to send request")?;
match self.vm_control_tube.recv::<VmMemoryResponse>() {
Ok(VmMemoryResponse::RegisterMemory(slot)) => {
Ok(VmMemoryResponse::RegisterMemory { region_id, .. }) => {
let cur_id = self.next_id;
self.registered_regions.insert(cur_id, slot);
self.registered_regions.insert(cur_id, region_id);
self.next_id += 1;
Ok(cur_id)
}
@ -4443,6 +4443,7 @@ fn vm_memory_handler_thread(
match tube.recv::<VmMemoryRequest>() {
Ok(request) => {
let response = request.execute(
tube,
&mut vm,
&mut sys_allocator_mutex.lock(),
&mut gralloc,

View file

@ -35,6 +35,8 @@ pub enum ApiClientError {
RequestFailed(#[from] base::Error),
#[error("API client tube send failed: {0}")]
Send(TubeError),
#[error("API client tube sending FDs failed: {0}")]
SendFds(TubeError),
#[error("Unexpected tube response")]
UnexpectedResponse,
}
@ -87,11 +89,43 @@ impl VmMemoryClient {
};
match self.request(&request)? {
VmMemoryResponse::Err(e) => Err(ApiClientError::RequestFailed(e)),
VmMemoryResponse::RegisterMemory(region_id) => Ok(region_id),
VmMemoryResponse::RegisterMemory { region_id, .. } => Ok(region_id),
_other => Err(ApiClientError::UnexpectedResponse),
}
}
#[cfg(any(target_os = "android", target_os = "linux"))]
pub fn mmap_and_register_memory(
&self,
mapping_address: GuestAddress,
shm: base::SharedMemory,
file_mapping_info: Vec<crate::VmMemoryFileMapping>,
) -> Result<u32> {
let num_file_mappings = file_mapping_info.len();
let req = VmMemoryRequest::MmapAndRegisterMemory {
shm,
dest: VmMemoryDestination::GuestPhysicalAddress(mapping_address.0),
num_file_mappings,
};
self.tube.send(&req).map_err(ApiClientError::Send)?;
// Since the number of FDs that can be sent via Tube at once is limited to
// SCM_MAX_FD, split `file_mappings` to chunks and send them
// repeatedly.
for m in file_mapping_info.chunks(base::unix::SCM_MAX_FD) {
self.tube
.send_with_max_fds(&m, m.len())
.map_err(ApiClientError::SendFds)?;
}
match self.tube.recv().map_err(ApiClientError::Recv)? {
VmMemoryResponse::RegisterMemory { slot, .. } => Ok(slot),
VmMemoryResponse::Err(e) => Err(ApiClientError::RequestFailed(e)),
_ => Err(ApiClientError::UnexpectedResponse),
}
}
/// Call hypervisor to free the given memory range.
pub fn dynamically_free_memory_range(
&self,

View file

@ -18,6 +18,10 @@ pub mod gpu;
#[cfg(any(target_os = "android", target_os = "linux"))]
use base::linux::MemoryMappingBuilderUnix;
#[cfg(any(target_os = "android", target_os = "linux"))]
use base::sys::call_with_extended_max_files;
#[cfg(any(target_os = "android", target_os = "linux"))]
use base::MemoryMappingArena;
#[cfg(windows)]
use base::MemoryMappingBuilderWindows;
use hypervisor::BalloonEvent;
@ -548,11 +552,25 @@ pub struct IoEventUpdateRequest {
pub register: bool,
}
/// Request to mmap a file to a shared memory.
/// This request is supposed to follow a `VmMemoryRequest::MmapAndRegisterMemory` request that
/// contains `SharedMemory` that `file` is mmaped to.
#[cfg(any(target_os = "android", target_os = "linux"))]
#[derive(Serialize, Deserialize)]
pub struct VmMemoryFileMapping {
#[serde(with = "with_as_descriptor")]
pub file: File,
pub length: usize,
pub mem_offset: usize,
pub file_offset: u64,
}
#[derive(Serialize, Deserialize)]
pub enum VmMemoryRequest {
/// Prepare a shared memory region to make later operations more efficient. This
/// may be a no-op depending on underlying platform support.
PrepareSharedMemoryRegion { alloc: Alloc, cache: MemCacheType },
/// Register a memory to be mapped to the guest.
RegisterMemory {
/// Source of the memory to register (mapped file descriptor, shared memory region, etc.)
source: VmMemorySource,
@ -563,6 +581,18 @@ pub enum VmMemoryRequest {
/// Cache attribute for guest memory setting
cache: MemCacheType,
},
#[cfg(any(target_os = "android", target_os = "linux"))]
/// Call mmap to `shm` and register the memory region as a read-only guest memory.
/// This request is followed by an array of `VmMemoryFileMapping` with length
/// `num_file_mappings`
MmapAndRegisterMemory {
/// Source of the memory to register (mapped file descriptor, shared memory region, etc.)
shm: SharedMemory,
/// Where to map the memory in the guest.
dest: VmMemoryDestination,
/// Length of the array of `VmMemoryFileMapping` that follows.
num_file_mappings: usize,
},
/// Call hypervisor to free the given memory range.
DynamicallyFreeMemoryRange {
guest_address: GuestAddress,
@ -678,16 +708,20 @@ fn try_map_to_prepared_region(
}
let gfn = gfn + (dest_offset >> 12);
let memory_region_id = VmMemoryRegionId(gfn);
let region_id = VmMemoryRegionId(gfn);
region_state.registered_memory.insert(
memory_region_id,
region_id,
RegisteredMemory::FixedMapping {
slot: *slot,
offset: *dest_offset as usize,
size,
},
);
Some(VmMemoryResponse::RegisterMemory(memory_region_id))
Some(VmMemoryResponse::RegisterMemory {
region_id,
slot: *slot,
})
}
impl VmMemoryRequest {
@ -702,6 +736,7 @@ impl VmMemoryRequest {
/// that received this `VmMemoryResponse`.
pub fn execute(
self,
#[cfg(any(target_os = "android", target_os = "linux"))] tube: &Tube,
vm: &mut impl Vm,
sys_allocator: &mut SystemAllocator,
gralloc: &mut RutabagaGralloc,
@ -793,7 +828,104 @@ impl VmMemoryRequest {
region_state
.registered_memory
.insert(region_id, RegisteredMemory::DynamicMapping { slot });
VmMemoryResponse::RegisterMemory(region_id)
VmMemoryResponse::RegisterMemory { region_id, slot }
}
#[cfg(any(target_os = "android", target_os = "linux"))]
MmapAndRegisterMemory {
shm,
dest,
num_file_mappings,
} => {
// Define a callback to be executed with extended limit of file counts.
// It recieves `num_file_mappings` FDs and call `add_fd_mapping` for each.
let callback = || {
let mem = match MemoryMappingBuilder::new(shm.size() as usize)
.from_shared_memory(&shm)
.build()
{
Ok(mem) => mem,
Err(e) => {
error!("Failed to build MemoryMapping from shared memory: {:#}", e);
return Err(VmMemoryResponse::Err(SysError::new(EINVAL)));
}
};
let mut mmap_arena = MemoryMappingArena::from(mem);
// If `num_file_mappings` exceeds `SCM_MAX_FD`, `file_mappings` are sent in
// chunks of length `SCM_MAX_FD`.
let mut file_mappings = Vec::with_capacity(num_file_mappings);
let mut read = 0;
while read < num_file_mappings {
let len = std::cmp::min(num_file_mappings - read, base::unix::SCM_MAX_FD);
let mps: Vec<VmMemoryFileMapping> = match tube.recv_with_max_fds(len) {
Ok(m) => m,
Err(e) => {
error!(
"Failed to get {num_file_mappings} FDs to be mapped: {:#}",
e
);
return Err(VmMemoryResponse::Err(SysError::new(EINVAL)));
}
};
file_mappings.extend(mps.into_iter());
read += len;
}
for VmMemoryFileMapping {
mem_offset,
length,
file,
file_offset,
} in file_mappings
{
if let Err(e) = mmap_arena.add_fd_mapping(
mem_offset,
length,
&file,
file_offset,
Protection::read(),
) {
error!("Failed to add fd mapping: {:#}", e);
return Err(VmMemoryResponse::Err(SysError::new(EINVAL)));
}
}
Ok(mmap_arena)
};
let mmap_arena = match call_with_extended_max_files(callback) {
Ok(Ok(m)) => m,
Ok(Err(e)) => {
return e;
}
Err(e) => {
error!("Failed to set max count of file descriptors: {e}");
return VmMemoryResponse::Err(e);
}
};
let size = shm.size();
let guest_addr = match dest.allocate(sys_allocator, size) {
Ok(addr) => addr,
Err(e) => return VmMemoryResponse::Err(e),
};
let slot = match vm.add_memory_region(
guest_addr,
Box::new(mmap_arena),
true,
false,
MemCacheType::CacheCoherent,
) {
Ok(slot) => slot,
Err(e) => return VmMemoryResponse::Err(e),
};
let region_id = VmMemoryRegionId(guest_addr.0 >> 12);
region_state
.registered_memory
.insert(region_id, RegisteredMemory::DynamicMapping { slot });
VmMemoryResponse::RegisterMemory { region_id, slot }
}
UnregisterMemory(id) => match region_state.registered_memory.remove(&id) {
Some(RegisteredMemory::DynamicMapping { slot }) => match vm
@ -923,7 +1055,10 @@ pub struct VmMemoryRegionId(u64);
#[derive(Serialize, Deserialize, Debug)]
pub enum VmMemoryResponse {
/// The request to register memory into guest address space was successful.
RegisterMemory(VmMemoryRegionId),
RegisterMemory {
region_id: VmMemoryRegionId,
slot: u32,
},
Ok,
Err(SysError),
}