mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2025-02-01 17:35:19 +00:00
vm-allocator: Fix free range allocation
This patch fixes the function first_available_range() responsible for finding the first range that could fit the requested size. The algorithm was working, that is allocating ranges from the end of the address space because we created an empty region right at the end. But the problem is, the VMM might request for some specific allocations at fixed address to allocate the RAM for example. In this case, the RAM range could be 0-1GiB, which means with the previous algorithm, the new available range would have been found right after 1GiB. This is not the intended behavior, and that's why the algorithm has been fixed by this patch, making sure to walk down existing ranges starting from the end. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
parent
0a04a950a1
commit
709148803e
@ -127,25 +127,32 @@ impl AddressAllocator {
|
||||
req_size: GuestUsize,
|
||||
alignment: GuestUsize,
|
||||
) -> Option<GuestAddress> {
|
||||
let mut prev_end_address = self.base;
|
||||
let reversed_ranges: Vec<(&GuestAddress, &GuestUsize)> = self.ranges.iter().rev().collect();
|
||||
|
||||
for (idx, (address, _size)) in reversed_ranges.iter().enumerate() {
|
||||
let next_range_idx = idx + 1;
|
||||
let prev_end_address = if next_range_idx >= reversed_ranges.len() {
|
||||
self.base
|
||||
} else {
|
||||
reversed_ranges[next_range_idx]
|
||||
.0
|
||||
.unchecked_add(*(reversed_ranges[next_range_idx].1))
|
||||
};
|
||||
|
||||
for (address, size) in self.ranges.iter() {
|
||||
// If we have enough space between this range and the previous one,
|
||||
// we return the start of this range minus the requested size.
|
||||
// As each new range is allocated at the end of the available address space,
|
||||
// we will tend to always allocate new ranges there as well. In other words,
|
||||
// ranges accumulate at the end of the address space.
|
||||
if address
|
||||
.unchecked_sub(self.align_address(prev_end_address, alignment).raw_value())
|
||||
.raw_value()
|
||||
>= req_size
|
||||
if let Some(size_delta) =
|
||||
address.checked_sub(self.align_address(prev_end_address, alignment).raw_value())
|
||||
{
|
||||
return Some(
|
||||
self.align_address(address.unchecked_sub(req_size + alignment), alignment),
|
||||
);
|
||||
if size_delta.raw_value() >= req_size {
|
||||
return Some(
|
||||
self.align_address(address.unchecked_sub(req_size + alignment), alignment),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
prev_end_address = address.unchecked_add(*size);
|
||||
}
|
||||
|
||||
None
|
||||
@ -257,7 +264,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(
|
||||
pool.allocate(None, 0x10, Some(0x100)),
|
||||
Some(GuestAddress(0x10b00))
|
||||
Some(GuestAddress(0x10d00))
|
||||
);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user