mirror of
https://chromium.googlesource.com/crosvm/crosvm
synced 2025-02-06 02:25:23 +00:00
devices: vvu: use CrOS memory size for BAR size
The size of the BAR limits the size of sibing VMs that can be used with the vvu proxy device. Since concierge creates VMs with memory size a little bit smaller than the amount of physical memory it sees, use the CrOS guest's memory size for the vvu proxy device bar size. Since what consumes memory is the mapping of sibling memory into the CrOS guest (not the BAR itself), it's not necessary to strictly limit BAR size. BUG=None TEST=vmc start termina Change-Id: I899e3f126b7ab32665aeabc05f51d3b121dec808 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3574423 Reviewed-by: Keiichi Watanabe <keiichiw@chromium.org> Tested-by: kokoro <noreply+kokoro@google.com> Commit-Queue: David Stevens <stevensd@chromium.org>
This commit is contained in:
parent
91296bdee0
commit
e123e39433
3 changed files with 29 additions and 15 deletions
|
@ -71,17 +71,6 @@ const CONFIG_UUID_SIZE: usize = 16;
|
|||
const VIRTIO_VHOST_USER_STATUS_SLAVE_UP: u8 = 0;
|
||||
|
||||
const BAR_INDEX: u8 = 2;
|
||||
// Bar size represents the amount of memory to be mapped for a sibling VM. Each
|
||||
// Virtio Vhost User Slave implementation requires access to the entire sibling
|
||||
// memory. It's assumed that sibling VM memory would be <= 8GB, hence this
|
||||
// constant value.
|
||||
//
|
||||
// TODO(abhishekbh): Understand why shared memory region size and overall bar
|
||||
// size differ in the QEMU implementation. The metadata required to map sibling
|
||||
// memory is about 16 MB per GB of sibling memory per device. Therefore, it is
|
||||
// in our interest to not waste space here and correlate it tightly to the
|
||||
// actual maximum memory a sibling VM can have.
|
||||
const BAR_SIZE: u64 = 1 << 33;
|
||||
|
||||
// Bar configuration.
|
||||
// All offsets are from the starting of bar `BAR_INDEX`.
|
||||
|
@ -92,8 +81,8 @@ const NOTIFICATIONS_OFFSET: u64 = DOORBELL_OFFSET + DOORBELL_SIZE;
|
|||
const NOTIFICATIONS_SIZE: u64 = 0x1000;
|
||||
const SHARED_MEMORY_OFFSET: u64 = NOTIFICATIONS_OFFSET + NOTIFICATIONS_SIZE;
|
||||
// TODO(abhishekbh): Copied from qemu with VVU support. This should be same as
|
||||
// `BAR_SIZE` but it's significantly lower than the memory allocated to a
|
||||
// sibling VM. Figure out how these two are related.
|
||||
// VirtioVhostUser.device_bar_size, but it's significantly lower than the
|
||||
// memory allocated to a sibling VM. Figure out how these two are related.
|
||||
const SHARED_MEMORY_SIZE: u64 = 0x1000;
|
||||
|
||||
// Notifications region related constants.
|
||||
|
@ -953,6 +942,14 @@ struct ActivateParams {
|
|||
pub struct VirtioVhostUser {
|
||||
base_features: u64,
|
||||
|
||||
// Represents the amount of memory to be mapped for a sibling VM. Each
|
||||
// Virtio Vhost User Slave implementation requires access to the entire sibling
|
||||
// memory.
|
||||
//
|
||||
// TODO(abhishekbh): Understand why shared memory region size and overall bar
|
||||
// size differ in the QEMU implementation.
|
||||
device_bar_size: u64,
|
||||
|
||||
// Bound socket waiting to accept a socket connection from the Vhost-user
|
||||
// sibling.
|
||||
listener: Option<UnixListener>,
|
||||
|
@ -995,9 +992,15 @@ impl VirtioVhostUser {
|
|||
main_process_tube: Tube,
|
||||
pci_address: Option<PciAddress>,
|
||||
uuid: Option<Uuid>,
|
||||
max_sibling_mem_size: u64,
|
||||
) -> Result<VirtioVhostUser> {
|
||||
let device_bar_size = max_sibling_mem_size
|
||||
.checked_next_power_of_two()
|
||||
.expect("Sibling too large");
|
||||
|
||||
Ok(VirtioVhostUser {
|
||||
base_features: base_features | 1 << VIRTIO_F_ACCESS_PLATFORM,
|
||||
device_bar_size,
|
||||
listener: Some(listener),
|
||||
config: VirtioVhostUserConfig {
|
||||
status: Le32::from(0),
|
||||
|
@ -1334,7 +1337,7 @@ impl VirtioDevice for VirtioVhostUser {
|
|||
|
||||
vec![PciBarConfiguration::new(
|
||||
BAR_INDEX as usize,
|
||||
BAR_SIZE as u64,
|
||||
self.device_bar_size,
|
||||
PciBarRegionType::Memory64BitRegion,
|
||||
// NotPrefetchable so as to exit on every read / write event in the
|
||||
// guest.
|
||||
|
|
|
@ -230,7 +230,12 @@ pub fn create_vhost_user_snd_device(cfg: &Config, option: &VhostUserOption) -> D
|
|||
})
|
||||
}
|
||||
|
||||
pub fn create_vvu_proxy_device(cfg: &Config, opt: &VvuOption, tube: Tube) -> DeviceResult {
|
||||
pub fn create_vvu_proxy_device(
|
||||
cfg: &Config,
|
||||
opt: &VvuOption,
|
||||
tube: Tube,
|
||||
max_sibling_mem_size: u64,
|
||||
) -> DeviceResult {
|
||||
let listener = UnixListener::bind(&opt.socket).map_err(|e| {
|
||||
error!("failed to bind listener for vvu proxy device: {}", e);
|
||||
e
|
||||
|
@ -242,6 +247,7 @@ pub fn create_vvu_proxy_device(cfg: &Config, opt: &VvuOption, tube: Tube) -> Dev
|
|||
tube,
|
||||
opt.addr,
|
||||
opt.uuid,
|
||||
max_sibling_mem_size,
|
||||
)
|
||||
.context("failed to create VVU proxy device")?;
|
||||
|
||||
|
|
|
@ -108,6 +108,7 @@ fn create_virtio_devices(
|
|||
fs_device_tubes: &mut Vec<Tube>,
|
||||
#[cfg(feature = "gpu")] render_server_fd: Option<SafeDescriptor>,
|
||||
vvu_proxy_device_tubes: &mut Vec<Tube>,
|
||||
vvu_proxy_max_sibling_mem_size: u64,
|
||||
) -> DeviceResult<Vec<VirtioDeviceStub>> {
|
||||
let mut devs = Vec::new();
|
||||
|
||||
|
@ -128,6 +129,7 @@ fn create_virtio_devices(
|
|||
cfg,
|
||||
opt,
|
||||
vvu_proxy_device_tubes.remove(0),
|
||||
vvu_proxy_max_sibling_mem_size,
|
||||
)?);
|
||||
}
|
||||
|
||||
|
@ -476,6 +478,7 @@ fn create_devices(
|
|||
map_request: Arc<Mutex<Option<ExternalMapping>>>,
|
||||
#[cfg(feature = "gpu")] render_server_fd: Option<SafeDescriptor>,
|
||||
vvu_proxy_device_tubes: &mut Vec<Tube>,
|
||||
vvu_proxy_max_sibling_mem_size: u64,
|
||||
) -> DeviceResult<Vec<(Box<dyn BusDeviceObj>, Option<Minijail>)>> {
|
||||
let mut devices: Vec<(Box<dyn BusDeviceObj>, Option<Minijail>)> = Vec::new();
|
||||
let mut balloon_inflate_tube: Option<Tube> = None;
|
||||
|
@ -591,6 +594,7 @@ fn create_devices(
|
|||
#[cfg(feature = "gpu")]
|
||||
render_server_fd,
|
||||
vvu_proxy_device_tubes,
|
||||
vvu_proxy_max_sibling_mem_size,
|
||||
)?;
|
||||
|
||||
for stub in stubs {
|
||||
|
@ -1294,6 +1298,7 @@ where
|
|||
#[cfg(feature = "gpu")]
|
||||
render_server_fd,
|
||||
&mut vvu_proxy_device_tubes,
|
||||
components.memory_size,
|
||||
)?;
|
||||
|
||||
let mut hp_endpoints_ranges: Vec<RangeInclusive<u32>> = Vec::new();
|
||||
|
|
Loading…
Reference in a new issue