mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 04:07:45 +00:00
UserspaceEmulator: Implement a proper VM allocator
This patch brings Kernel::RangeAllocator to UserspaceEmulator in a slightly simplified form. It supports the basic three allocation types needed by virt$mmap(): allocate_anywhere, allocate_specific, and allocate_randomized. Porting virt$mmap() and virt$munmap() to use the allocator makes UE work correctly once again. :^)
This commit is contained in:
parent
9dacd7c0ec
commit
89483a9408
10 changed files with 438 additions and 36 deletions
207
Userland/DevTools/UserspaceEmulator/RangeAllocator.cpp
Normal file
207
Userland/DevTools/UserspaceEmulator/RangeAllocator.cpp
Normal file
|
@ -0,0 +1,207 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "RangeAllocator.h"
|
||||
#include <AK/BinarySearch.h>
|
||||
#include <AK/Checked.h>
|
||||
#include <AK/QuickSort.h>
|
||||
#include <AK/Random.h>
|
||||
|
||||
#define VM_GUARD_PAGES
|
||||
#define PAGE_MASK ((FlatPtr)0xfffff000u)
|
||||
|
||||
namespace UserspaceEmulator {
|
||||
|
||||
RangeAllocator::RangeAllocator()
|
||||
: m_total_range({}, 0)
|
||||
{
|
||||
}
|
||||
|
||||
void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
|
||||
{
|
||||
m_total_range = { base, size };
|
||||
m_available_ranges.append({ base, size });
|
||||
}
|
||||
|
||||
RangeAllocator::~RangeAllocator()
|
||||
{
|
||||
}
|
||||
|
||||
void RangeAllocator::dump() const
|
||||
{
|
||||
dbgln("RangeAllocator({})", this);
|
||||
for (auto& range : m_available_ranges) {
|
||||
dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
void RangeAllocator::carve_at_index(int index, const Range& range)
|
||||
{
|
||||
auto remaining_parts = m_available_ranges[index].carve(range);
|
||||
ASSERT(remaining_parts.size() >= 1);
|
||||
ASSERT(m_total_range.contains(remaining_parts[0]));
|
||||
m_available_ranges[index] = remaining_parts[0];
|
||||
if (remaining_parts.size() == 2) {
|
||||
ASSERT(m_total_range.contains(remaining_parts[1]));
|
||||
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
|
||||
}
|
||||
}
|
||||
|
||||
Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
|
||||
{
|
||||
if (!size)
|
||||
return {};
|
||||
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
ASSERT((alignment % PAGE_SIZE) == 0);
|
||||
|
||||
// FIXME: I'm sure there's a smarter way to do this.
|
||||
static constexpr size_t maximum_randomization_attempts = 1000;
|
||||
for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
|
||||
VirtualAddress random_address { AK::get_random<FlatPtr>() };
|
||||
random_address.mask(PAGE_MASK);
|
||||
|
||||
if (!m_total_range.contains(random_address, size))
|
||||
continue;
|
||||
|
||||
auto range = allocate_specific(random_address, size);
|
||||
if (range.has_value())
|
||||
return range;
|
||||
}
|
||||
|
||||
return allocate_anywhere(size, alignment);
|
||||
}
|
||||
|
||||
Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||
{
|
||||
if (!size)
|
||||
return {};
|
||||
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
ASSERT((alignment % PAGE_SIZE) == 0);
|
||||
|
||||
#ifdef VM_GUARD_PAGES
|
||||
// NOTE: We pad VM allocations with a guard page on each side.
|
||||
if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
|
||||
return {};
|
||||
|
||||
size_t effective_size = size + PAGE_SIZE * 2;
|
||||
size_t offset_from_effective_base = PAGE_SIZE;
|
||||
#else
|
||||
size_t effective_size = size;
|
||||
size_t offset_from_effective_base = 0;
|
||||
#endif
|
||||
|
||||
if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
|
||||
return {};
|
||||
|
||||
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
|
||||
auto& available_range = m_available_ranges[i];
|
||||
// FIXME: This check is probably excluding some valid candidates when using a large alignment.
|
||||
if (available_range.size() < (effective_size + alignment))
|
||||
continue;
|
||||
|
||||
FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
|
||||
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
|
||||
|
||||
Range allocated_range(VirtualAddress(aligned_base), size);
|
||||
ASSERT(m_total_range.contains(allocated_range));
|
||||
|
||||
if (available_range == allocated_range) {
|
||||
m_available_ranges.remove(i);
|
||||
return allocated_range;
|
||||
}
|
||||
carve_at_index(i, allocated_range);
|
||||
return allocated_range;
|
||||
}
|
||||
klog() << "RangeAllocator: Failed to allocate anywhere: " << size << ", " << alignment;
|
||||
return {};
|
||||
}
|
||||
|
||||
Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
||||
{
|
||||
if (!size)
|
||||
return {};
|
||||
|
||||
ASSERT(base.is_page_aligned());
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
|
||||
Range allocated_range(base, size);
|
||||
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
|
||||
auto& available_range = m_available_ranges[i];
|
||||
ASSERT(m_total_range.contains(allocated_range));
|
||||
if (!available_range.contains(base, size))
|
||||
continue;
|
||||
if (available_range == allocated_range) {
|
||||
m_available_ranges.remove(i);
|
||||
return allocated_range;
|
||||
}
|
||||
carve_at_index(i, allocated_range);
|
||||
return allocated_range;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
void RangeAllocator::deallocate(const Range& range)
|
||||
{
|
||||
ASSERT(m_total_range.contains(range));
|
||||
ASSERT(range.size());
|
||||
ASSERT((range.size() % PAGE_SIZE) == 0);
|
||||
ASSERT(range.base() < range.end());
|
||||
ASSERT(!m_available_ranges.is_empty());
|
||||
|
||||
size_t nearby_index = 0;
|
||||
auto* existing_range = binary_search(
|
||||
m_available_ranges.span(),
|
||||
range,
|
||||
&nearby_index,
|
||||
[](auto& a, auto& b) { return a.base().get() - b.end().get(); });
|
||||
|
||||
size_t inserted_index = 0;
|
||||
if (existing_range) {
|
||||
existing_range->m_size += range.size();
|
||||
inserted_index = nearby_index;
|
||||
} else {
|
||||
m_available_ranges.insert_before_matching(
|
||||
Range(range), [&](auto& entry) {
|
||||
return entry.base() >= range.end();
|
||||
},
|
||||
nearby_index, &inserted_index);
|
||||
}
|
||||
|
||||
if (inserted_index < (m_available_ranges.size() - 1)) {
|
||||
// We already merged with previous. Try to merge with next.
|
||||
auto& inserted_range = m_available_ranges[inserted_index];
|
||||
auto& next_range = m_available_ranges[inserted_index + 1];
|
||||
if (inserted_range.end() == next_range.base()) {
|
||||
inserted_range.m_size += next_range.size();
|
||||
m_available_ranges.remove(inserted_index + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue