resources: allocate_at support for address_allocator

Support for address range allocation at specific
location and size. Allocation is successful only
if requested range is available and free.
Change to address_allocator allocation algorithm
from contiguous memory allocation to first-fit like
implementation to allow memory release/reuse in future.

BUG=None
TEST=cargo test -p resources && tast test vm.*

Change-Id: I58218f5a2c6a215152904cc1cf0748e842fa7374
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2203297
Tested-by: Tomasz Jeznach <tjeznach@chromium.org>
Commit-Queue: Tomasz Jeznach <tjeznach@chromium.org>
Reviewed-by: Zach Reizner <zachr@chromium.org>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-by: Dylan Reid <dgreid@chromium.org>
This commit is contained in:
Tomasz Jeznach 2020-05-15 14:26:27 -07:00 committed by Commit Bot
parent e7d1221c9d
commit aa6bdd92da

View file

@ -3,7 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::{BTreeSet, HashMap};
use crate::{Alloc, Error, Result}; use crate::{Alloc, Error, Result};
@ -26,11 +26,9 @@ use crate::{Alloc, Error, Result};
/// ``` /// ```
#[derive(Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
pub struct AddressAllocator { pub struct AddressAllocator {
pool_base: u64,
pool_end: u64,
alignment: u64, alignment: u64,
next_addr: u64,
allocs: HashMap<Alloc, (u64, u64, String)>, allocs: HashMap<Alloc, (u64, u64, String)>,
regions: BTreeSet<(u64, u64)>,
} }
impl AddressAllocator { impl AddressAllocator {
@ -55,12 +53,12 @@ impl AddressAllocator {
if !alignment.is_power_of_two() || alignment == 0 { if !alignment.is_power_of_two() || alignment == 0 {
return Err(Error::BadAlignment); return Err(Error::BadAlignment);
} }
let mut regions = BTreeSet::new();
regions.insert((pool_base, pool_end));
Ok(AddressAllocator { Ok(AddressAllocator {
pool_base,
pool_end,
alignment, alignment,
next_addr: pool_base,
allocs: HashMap::new(), allocs: HashMap::new(),
regions,
}) })
} }
@ -85,32 +83,80 @@ impl AddressAllocator {
if !alignment.is_power_of_two() { if !alignment.is_power_of_two() {
return Err(Error::BadAlignment); return Err(Error::BadAlignment);
} }
let align_adjust = if self.next_addr % alignment != 0 {
alignment - (self.next_addr % alignment) // finds first region matching alignment and size.
} else { match self
0 .regions
}; .iter()
let addr = self .find(|range| {
.next_addr match range.0 % alignment {
.checked_add(align_adjust) 0 => range.0.checked_add(size - 1),
.ok_or(Error::OutOfSpace)?; r => range.0.checked_add(size - 1 + alignment - r),
let end_addr = addr.checked_add(size - 1).ok_or(Error::OutOfSpace)?;
if end_addr > self.pool_end {
return Err(Error::OutOfSpace);
} }
.map_or(false, |end| end <= range.1)
})
.cloned()
{
Some(slot) => {
self.regions.remove(&slot);
let start = match slot.0 % alignment {
0 => slot.0,
r => slot.0 + alignment - r,
};
let end = start + size - 1;
if slot.0 < start {
self.regions.insert((slot.0, start - 1));
}
if slot.1 > end {
self.regions.insert((end + 1, slot.1));
}
self.allocs.insert(alloc, (start, size, tag));
// TODO(dgreid): Use a smarter allocation strategy. The current strategy is just Ok(start)
// bumping this pointer, meaning it will eventually exhaust available addresses. }
self.next_addr = end_addr.saturating_add(1); None => Err(Error::OutOfSpace),
}
self.allocs.insert(alloc, (addr, size, tag));
Ok(addr)
} }
pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> { pub fn allocate(&mut self, size: u64, alloc: Alloc, tag: String) -> Result<u64> {
self.allocate_with_align(size, alloc, tag, self.alignment) self.allocate_with_align(size, alloc, tag, self.alignment)
} }
/// Allocates a range of addresses from the managed region with an optional tag
/// and required location. Allocation alignment is not enforced.
/// Returns OutOfSpace if requested range is not available (e.g. already allocated
/// with a different alloc tag).
pub fn allocate_at(&mut self, start: u64, size: u64, alloc: Alloc, tag: String) -> Result<()> {
if self.allocs.contains_key(&alloc) {
return Err(Error::ExistingAlloc(alloc));
}
if size == 0 {
return Err(Error::AllocSizeZero);
}
let end = start.checked_add(size - 1).ok_or(Error::OutOfSpace)?;
match self
.regions
.iter()
.find(|range| range.0 <= start && range.1 >= end)
.cloned()
{
Some(slot) => {
self.regions.remove(&slot);
if slot.0 < start {
self.regions.insert((slot.0, start - 1));
}
if slot.1 > end {
self.regions.insert((end + 1, slot.1));
}
self.allocs.insert(alloc, (start, size, tag));
Ok(())
}
None => Err(Error::OutOfSpace),
}
}
/// Returns allocation associated with `alloc`, or None if no such allocation exists. /// Returns allocation associated with `alloc`, or None if no such allocation exists.
pub fn get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)> { pub fn get(&self, alloc: &Alloc) -> Option<&(u64, u64, String)> {
self.allocs.get(alloc) self.allocs.get(alloc)
@ -171,6 +217,44 @@ mod tests {
); );
} }
#[test]
fn allocate_with_special_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100)).unwrap();
assert_eq!(
pool.allocate(0x10, Alloc::Anon(0), String::from("bar0")),
Ok(0x1000)
);
assert_eq!(
pool.allocate_at(0x1200, 0x100, Alloc::Anon(1), String::from("bar1")),
Ok(())
);
assert_eq!(
pool.allocate_with_align(0x800, Alloc::Anon(2), String::from("bar2"), 0x800),
Ok(0x1800)
);
}
#[test]
fn allocate_and_split_allocate_at() {
let mut pool = AddressAllocator::new(0x1000, 0x1000, Some(0x100)).unwrap();
assert_eq!(
pool.allocate_at(0x1200, 0x800, Alloc::Anon(0), String::from("bar0")),
Ok(())
);
assert_eq!(
pool.allocate(0x800, Alloc::Anon(1), String::from("bar1")),
Err(Error::OutOfSpace)
);
assert_eq!(
pool.allocate(0x600, Alloc::Anon(2), String::from("bar2")),
Ok(0x1a00)
);
assert_eq!(
pool.allocate(0x200, Alloc::Anon(3), String::from("bar3")),
Ok(0x1000)
);
}
#[test] #[test]
fn allocate_alignment() { fn allocate_alignment() {
let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100)).unwrap(); let mut pool = AddressAllocator::new(0x1000, 0x10000, Some(0x100)).unwrap();