vm-allocator: Align address at allocation time

There is alignment support for AddressAllocator but there are occations
that the alignment is known only when we call allocate(). One example
is PCI BAR which is natually aligned, means for which we have to align
the base address to its size.

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
This commit is contained in:
Chao Peng 2019-07-03 19:40:04 +00:00 committed by Sebastien Boeuf
parent af7cd74e04
commit 96fb38a5aa
4 changed files with 111 additions and 75 deletions

159
vm-allocator/src/address.rs Normal file → Executable file
View File

@ -28,46 +28,34 @@ pub type Result<T> = result::Result<T, Error>;
/// ```
/// # use vm_allocator::AddressAllocator;
/// # use vm_memory::{Address, GuestAddress, GuestUsize};
/// AddressAllocator::new(GuestAddress(0x1000), 0x10000, Some(0x100)).map(|mut pool| {
/// assert_eq!(pool.allocate(None, 0x110), Some(GuestAddress(0x10e00)));
/// assert_eq!(pool.allocate(None, 0x100), Some(GuestAddress(0x10c00)));
/// AddressAllocator::new(GuestAddress(0x1000), 0x10000).map(|mut pool| {
/// assert_eq!(pool.allocate(None, 0x110, Some(0x100)), Some(GuestAddress(0x10e00)));
/// assert_eq!(pool.allocate(None, 0x100, Some(0x100)), Some(GuestAddress(0x10c00)));
/// });
/// ```
#[derive(Debug, Eq, PartialEq)]
pub struct AddressAllocator {
base: GuestAddress,
end: GuestAddress,
alignment: GuestUsize,
ranges: BTreeMap<GuestAddress, GuestUsize>,
}
impl AddressAllocator {
/// Creates a new `AddressAllocator` for managing a range of addresses.
/// Can return `None` if `pool_base` + `pool_size` overflows a u64 or if alignment isn't a power
/// of two.
/// Can return `None` if `base` + `size` overflows a u64.
///
/// * `pool_base` - The starting address of the range to manage.
/// * `pool_size` - The size of the address range in bytes.
/// * `align_size` - The minimum size of an address region to align to, defaults to four.
pub fn new(
base: GuestAddress,
size: GuestUsize,
align_size: Option<GuestUsize>,
) -> Option<Self> {
/// * `base` - The starting address of the range to manage.
/// * `size` - The size of the address range in bytes.
pub fn new(base: GuestAddress, size: GuestUsize) -> Option<Self> {
if size == 0 {
return None;
}
let end = base.checked_add(size - 1)?;
let alignment = align_size.unwrap_or(4);
if !alignment.is_power_of_two() || alignment == 0 {
return None;
}
let mut allocator = AddressAllocator {
base,
end,
alignment,
ranges: BTreeMap::new(),
};
@ -78,9 +66,9 @@ impl AddressAllocator {
Some(allocator)
}
fn align_address(&self, address: GuestAddress) -> GuestAddress {
let align_adjust = if address.raw_value() % self.alignment != 0 {
self.alignment - (address.raw_value() % self.alignment)
fn align_address(&self, address: GuestAddress, alignment: GuestUsize) -> GuestAddress {
let align_adjust = if address.raw_value() % alignment != 0 {
alignment - (address.raw_value() % alignment)
} else {
0
};
@ -92,8 +80,9 @@ impl AddressAllocator {
&self,
req_address: GuestAddress,
req_size: GuestUsize,
alignment: GuestUsize,
) -> Result<GuestAddress> {
let aligned_address = self.align_address(req_address);
let aligned_address = self.align_address(req_address, alignment);
// The requested address should be aligned.
if aligned_address != req_address {
@ -133,7 +122,11 @@ impl AddressAllocator {
Err(Error::Overflow)
}
fn first_available_range(&self, req_size: GuestUsize) -> Option<GuestAddress> {
fn first_available_range(
&self,
req_size: GuestUsize,
alignment: GuestUsize,
) -> Option<GuestAddress> {
let mut prev_end_address = self.base;
for (address, size) in self.ranges.iter() {
@ -143,11 +136,13 @@ impl AddressAllocator {
// we will tend to always allocate new ranges there as well. In other words,
// ranges accumulate at the end of the address space.
if address
.unchecked_sub(self.align_address(prev_end_address).raw_value())
.unchecked_sub(self.align_address(prev_end_address, alignment).raw_value())
.raw_value()
>= req_size
{
return Some(self.align_address(address.unchecked_sub(req_size + self.alignment)));
return Some(
self.align_address(address.unchecked_sub(req_size + alignment), alignment),
);
}
prev_end_address = address.unchecked_add(*size);
@ -157,24 +152,31 @@ impl AddressAllocator {
}
/// Allocates a range of addresses from the managed region. Returns `Some(allocated_address)`
/// when successful, or `None` if an area of `size` can't be allocated.
/// when successful, or `None` if an area of `size` can't be allocated or if alignment isn't
/// a power of two.
pub fn allocate(
&mut self,
address: Option<GuestAddress>,
size: GuestUsize,
align_size: Option<GuestUsize>,
) -> Option<GuestAddress> {
if size == 0 {
return None;
}
let alignment = align_size.unwrap_or(4);
if !alignment.is_power_of_two() || alignment == 0 {
return None;
}
let new_addr = match address {
Some(req_address) => match self.available_range(req_address, size) {
Some(req_address) => match self.available_range(req_address, size, alignment) {
Ok(addr) => addr,
Err(_) => {
return None;
}
},
None => self.first_available_range(size)?,
None => self.first_available_range(size, alignment)?,
};
self.ranges.insert(new_addr, size);
@ -200,154 +202,183 @@ mod tests {
#[test]
fn new_fails_overflow() {
assert_eq!(
AddressAllocator::new(GuestAddress(u64::max_value()), 0x100, None),
AddressAllocator::new(GuestAddress(u64::max_value()), 0x100),
None
);
}
#[test]
fn new_fails_size_zero() {
assert_eq!(AddressAllocator::new(GuestAddress(0x1000), 0, None), None);
assert_eq!(AddressAllocator::new(GuestAddress(0x1000), 0), None);
}
#[test]
fn new_fails_alignment_zero() {
fn allocate_fails_alignment_zero() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
assert_eq!(
AddressAllocator::new(GuestAddress(0x1000), 0x10000, Some(0)),
pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(0)),
None
);
}
#[test]
fn new_fails_alignment_non_power_of_two() {
fn allocate_fails_alignment_non_power_of_two() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
assert_eq!(
AddressAllocator::new(GuestAddress(0x1000), 0x10000, Some(200)),
pool.allocate(Some(GuestAddress(0x1000)), 0x100, Some(200)),
None
);
}
#[test]
fn allocate_fails_not_enough_space() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, Some(0x100)).unwrap();
assert_eq!(pool.allocate(None, 0x800), Some(GuestAddress(0x1700)));
assert_eq!(pool.allocate(None, 0x900), None);
assert_eq!(pool.allocate(None, 0x400), Some(GuestAddress(0x1200)));
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
assert_eq!(
pool.allocate(None, 0x800, Some(0x100)),
Some(GuestAddress(0x1700))
);
assert_eq!(pool.allocate(None, 0x900, Some(0x100)), None);
assert_eq!(
pool.allocate(None, 0x400, Some(0x100)),
Some(GuestAddress(0x1200))
);
}
#[test]
fn allocate_alignment() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000, Some(0x100)).unwrap();
assert_eq!(pool.allocate(None, 0x110), Some(GuestAddress(0x10e00)));
assert_eq!(pool.allocate(None, 0x100), Some(GuestAddress(0x10c00)));
assert_eq!(pool.allocate(None, 0x10), Some(GuestAddress(0x10b00)));
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x10000).unwrap();
assert_eq!(
pool.allocate(None, 0x110, Some(0x100)),
Some(GuestAddress(0x10e00))
);
assert_eq!(
pool.allocate(None, 0x100, Some(0x100)),
Some(GuestAddress(0x10c00))
);
assert_eq!(
pool.allocate(None, 0x10, Some(0x100)),
Some(GuestAddress(0x10b00))
);
}
#[test]
fn allocate_address() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, None).unwrap();
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, None),
Some(GuestAddress(0x1200))
);
assert_eq!(
pool.allocate(Some(GuestAddress(0x1a00)), 0x100),
pool.allocate(Some(GuestAddress(0x1a00)), 0x100, None),
Some(GuestAddress(0x1a00))
);
}
#[test]
fn allocate_address_alignment() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, Some(0x100)).unwrap();
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
Some(GuestAddress(0x1200))
);
// Unaligned request
assert_eq!(pool.allocate(Some(GuestAddress(0x1210)), 0x800), None);
assert_eq!(
pool.allocate(Some(GuestAddress(0x1210)), 0x800, Some(0x100)),
None
);
// Aligned request
assert_eq!(
pool.allocate(Some(GuestAddress(0x1b00)), 0x100),
pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)),
Some(GuestAddress(0x1b00))
);
}
#[test]
fn allocate_address_not_enough_space() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, Some(0x100)).unwrap();
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
// First range is [0x1200:0x1a00]
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
Some(GuestAddress(0x1200))
);
// Second range is [0x1c00:0x1e00]
assert_eq!(
pool.allocate(Some(GuestAddress(0x1c00)), 0x200),
pool.allocate(Some(GuestAddress(0x1c00)), 0x200, Some(0x100)),
Some(GuestAddress(0x1c00))
);
// There is 0x200 between the first 2 ranges.
// We ask for an available address but the range is too big
assert_eq!(pool.allocate(Some(GuestAddress(0x1b00)), 0x800), None);
assert_eq!(
pool.allocate(Some(GuestAddress(0x1b00)), 0x800, Some(0x100)),
None
);
// We ask for an available address, with a small enough range
assert_eq!(
pool.allocate(Some(GuestAddress(0x1b00)), 0x100),
pool.allocate(Some(GuestAddress(0x1b00)), 0x100, Some(0x100)),
Some(GuestAddress(0x1b00))
);
}
#[test]
fn allocate_address_free_and_realloc() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, Some(0x100)).unwrap();
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
// First range is [0x1200:0x1a00]
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
Some(GuestAddress(0x1200))
);
pool.free(GuestAddress(0x1200), 0x800);
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
Some(GuestAddress(0x1200))
);
}
#[test]
fn allocate_address_free_fail_and_realloc() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, Some(0x100)).unwrap();
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
// First range is [0x1200:0x1a00]
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
Some(GuestAddress(0x1200))
);
// We try to free a range smaller than the allocated one.
pool.free(GuestAddress(0x1200), 0x100);
assert_eq!(pool.allocate(Some(GuestAddress(0x1200)), 0x800), None);
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
None
);
}
#[test]
fn allocate_address_fail_free_and_realloc() {
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000, Some(0x100)).unwrap();
let mut pool = AddressAllocator::new(GuestAddress(0x1000), 0x1000).unwrap();
// First allocation fails
assert_eq!(pool.allocate(Some(GuestAddress(0x1200)), 0x2000), None);
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x2000, Some(0x100)),
None
);
// We try to free a range that was not allocated.
pool.free(GuestAddress(0x1200), 0x2000);
// Now we try an allocation that should succeed.
assert_eq!(
pool.allocate(Some(GuestAddress(0x1200)), 0x800),
pool.allocate(Some(GuestAddress(0x1200)), 0x800, Some(0x100)),
Some(GuestAddress(0x1200))
);
}

View File

@ -33,7 +33,7 @@ fn pagesize() -> usize {
/// 5).unwrap();
/// assert_eq!(allocator.allocate_irq(), Some(5));
/// assert_eq!(allocator.allocate_irq(), Some(6));
/// assert_eq!(allocator.allocate_mmio_addresses(None, 0x1000), Some(GuestAddress(0x1fffe000)));
/// assert_eq!(allocator.allocate_mmio_addresses(None, 0x1000, Some(0x1000)), Some(GuestAddress(0x1fffe000)));
///
/// ```
pub struct SystemAllocator {
@ -44,8 +44,7 @@ pub struct SystemAllocator {
impl SystemAllocator {
/// Creates a new `SystemAllocator` for managing addresses and irq numvers.
/// Can return `None` if `base` + `size` overflows a u64 or if alignment isn't a power
/// of two.
/// Can return `None` if `base` + `size` overflows a u64
///
/// * `io_base` - The starting address of IO memory.
/// * `io_size` - The size of IO memory.
@ -59,10 +58,9 @@ impl SystemAllocator {
mmio_size: GuestUsize,
first_irq: u32,
) -> Option<Self> {
let page_size = pagesize() as u64;
Some(SystemAllocator {
io_address_space: AddressAllocator::new(io_base, io_size, Some(0x1))?,
mmio_address_space: AddressAllocator::new(mmio_base, mmio_size, Some(page_size))?,
io_address_space: AddressAllocator::new(io_base, io_size)?,
mmio_address_space: AddressAllocator::new(mmio_base, mmio_size)?,
next_irq: first_irq,
})
}
@ -82,8 +80,10 @@ impl SystemAllocator {
&mut self,
address: Option<GuestAddress>,
size: GuestUsize,
align_size: Option<GuestUsize>,
) -> Option<GuestAddress> {
self.io_address_space.allocate(address, size)
self.io_address_space
.allocate(address, size, Some(align_size.unwrap_or(0x1)))
}
/// Reserves a section of `size` bytes of MMIO address space.
@ -91,8 +91,13 @@ impl SystemAllocator {
&mut self,
address: Option<GuestAddress>,
size: GuestUsize,
align_size: Option<GuestUsize>,
) -> Option<GuestAddress> {
self.mmio_address_space.allocate(address, size)
self.mmio_address_space.allocate(
address,
size,
Some(align_size.unwrap_or(pagesize() as u64)),
)
}
/// Free an IO address range.

View File

@ -451,7 +451,7 @@ impl PciDevice for VirtioPciDevice {
// Allocate the virtio-pci capability BAR.
// See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004
let virtio_pci_bar_addr = allocator
.allocate_mmio_addresses(None, CAPABILITY_BAR_SIZE)
.allocate_mmio_addresses(None, CAPABILITY_BAR_SIZE, None)
.ok_or(PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE))?;
let config = PciBarConfiguration::default()
.set_register_index(0)
@ -474,7 +474,7 @@ impl PciDevice for VirtioPciDevice {
// Allocate the device specific BARs.
for config in self.device.get_device_bars() {
let device_bar_addr = allocator
.allocate_mmio_addresses(None, config.get_size())
.allocate_mmio_addresses(None, config.get_size(), None)
.ok_or_else(|| PciDeviceError::IoAllocationFailed(config.get_size()))?;
config.set_address(device_bar_addr.raw_value());
let _device_bar = self.configuration.add_pci_bar(&config).map_err(|e| {

View File

@ -668,7 +668,7 @@ impl DeviceManager {
let size = pmem_cfg.size;
let pmem_guest_addr = allocator
.allocate_mmio_addresses(None, size as GuestUsize)
.allocate_mmio_addresses(None, size as GuestUsize, None)
.ok_or(DeviceManagerError::PmemRangeAllocation)?;
let (custom_flags, set_len) = if pmem_cfg.file.is_dir() {