diff --git a/Kernel/Makefile b/Kernel/Makefile index b61b230b76..d8f6f750a8 100644 --- a/Kernel/Makefile +++ b/Kernel/Makefile @@ -18,6 +18,7 @@ KERNEL_OBJS = \ VM/VMObject.o \ VM/PageDirectory.o \ VM/PhysicalPage.o \ + VM/RangeAllocator.o \ Console.o \ IRQHandler.o \ kprintf.o \ diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index d1db77623c..5c515056a9 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -70,14 +70,16 @@ bool Process::in_group(gid_t gid) const Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name, bool is_readable, bool is_writable, bool commit) { + laddr.mask(PAGE_MASK); size = PAGE_ROUND_UP(size); - // FIXME: This needs sanity checks. What if this overlaps existing regions? - if (laddr.is_null()) { - laddr = m_next_region; - m_next_region = m_next_region.offset(size).offset(PAGE_SIZE); - } - laddr.mask(0xfffff000); - m_regions.append(adopt(*new Region(laddr, size, move(name), is_readable, is_writable))); + + Range range; + if (laddr.is_null()) + range = m_range_allocator.allocate_anywhere(size); + else + range = m_range_allocator.allocate_specific(laddr, size); + + m_regions.append(adopt(*new Region(range.base(), range.size(), move(name), is_readable, is_writable))); MM.map_region(*this, *m_regions.last()); if (commit) m_regions.last()->commit(); @@ -86,30 +88,34 @@ Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name Region* Process::allocate_file_backed_region(LinearAddress laddr, size_t size, RetainPtr&& inode, String&& name, bool is_readable, bool is_writable) { + laddr.mask(PAGE_MASK); size = PAGE_ROUND_UP(size); - // FIXME: This needs sanity checks. What if this overlaps existing regions? - if (laddr.is_null()) { - laddr = m_next_region; - m_next_region = m_next_region.offset(size).offset(PAGE_SIZE); - } - laddr.mask(0xfffff000); - m_regions.append(adopt(*new Region(laddr, size, move(inode), move(name), is_readable, is_writable))); + + Range range; + if (laddr.is_null()) + range = m_range_allocator.allocate_anywhere(size); + else + range = m_range_allocator.allocate_specific(laddr, size); + + m_regions.append(adopt(*new Region(range.base(), range.size(), move(inode), move(name), is_readable, is_writable))); MM.map_region(*this, *m_regions.last()); return m_regions.last().ptr(); } Region* Process::allocate_region_with_vmo(LinearAddress laddr, size_t size, Retained&& vmo, size_t offset_in_vmo, String&& name, bool is_readable, bool is_writable) { + laddr.mask(PAGE_MASK); size = PAGE_ROUND_UP(size); - // FIXME: This needs sanity checks. What if this overlaps existing regions? - if (laddr.is_null()) { - laddr = m_next_region; - m_next_region = m_next_region.offset(size).offset(PAGE_SIZE); - } - laddr.mask(0xfffff000); + + Range range; + if (laddr.is_null()) + range = m_range_allocator.allocate_anywhere(size); + else + range = m_range_allocator.allocate_specific(laddr, size); + offset_in_vmo &= PAGE_MASK; size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE; - m_regions.append(adopt(*new Region(laddr, size, move(vmo), offset_in_vmo, move(name), is_readable, is_writable))); + m_regions.append(adopt(*new Region(range.base(), range.size(), move(vmo), offset_in_vmo, move(name), is_readable, is_writable))); MM.map_region(*this, *m_regions.last()); return m_regions.last().ptr(); } @@ -119,6 +125,7 @@ bool Process::deallocate_region(Region& region) InterruptDisabler disabler; for (int i = 0; i < m_regions.size(); ++i) { if (m_regions[i] == ®ion) { + m_range_allocator.deallocate({ region.laddr(), region.size() }); MM.unmap_region(region); m_regions.remove(i); return true; @@ -539,6 +546,9 @@ Process* Process::create_kernel_process(String&& name, void (*e)()) return process; } +static const dword userspace_range_base = 0x01000000; +static const dword kernelspace_range_base = 0xc0000000; + Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring, RetainPtr&& cwd, RetainPtr&& executable, TTY* tty, Process* fork_parent) : m_name(move(name)) , m_pid(next_pid++) // FIXME: RACE: This variable looks racy! @@ -551,6 +561,7 @@ Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring , m_executable(move(executable)) , m_tty(tty) , m_ppid(ppid) + , m_range_allocator(LinearAddress(userspace_range_base), kernelspace_range_base - userspace_range_base) { dbgprintf("Process: New process PID=%u with name=%s\n", m_pid, m_name.characters()); diff --git a/Kernel/Process.h b/Kernel/Process.h index a158ccd460..50be655577 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -7,10 +7,10 @@ #include #include #include +#include #include #include #include - #include #include @@ -331,6 +331,7 @@ private: RetainPtr m_tracer; OwnPtr m_elf_loader; + RangeAllocator m_range_allocator; Lock m_big_lock { "Process" }; }; diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp new file mode 100644 index 0000000000..33be113c92 --- /dev/null +++ b/Kernel/VM/RangeAllocator.cpp @@ -0,0 +1,137 @@ +#include +#include +#include + +RangeAllocator::RangeAllocator(LinearAddress base, size_t size) +{ + m_available_ranges.append({ base, size }); +} + +RangeAllocator::~RangeAllocator() +{ +} + +void RangeAllocator::dump() const +{ + dbgprintf("RangeAllocator{%p}\n", this); + for (auto& range : m_available_ranges) { + dbgprintf(" %x -> %x\n", range.base().get(), range.end().get() - 1); + } +} + +Vector Range::carve(const Range& taken) +{ + Vector parts; + if (taken == *this) + return { }; + if (taken.base() > base()) + parts.append({ base(), taken.base().get() - base().get() }); + if (taken.end() < end()) + parts.append({ taken.end(), end().get() - taken.end().get() }); +#ifdef VRA_DEBUG + dbgprintf("VRA: carve: remaining parts:\n"); + for (int i = 0; i < parts.size(); ++i) + dbgprintf(" %x-%x\n", parts[i].base().get(), parts[i].end().get() - 1); +#endif + return parts; +} + +void RangeAllocator::carve_at_index(int index, const Range& range) +{ + auto remaining_parts = m_available_ranges[index].carve(range); + ASSERT(remaining_parts.size() >= 1); + m_available_ranges[index] = remaining_parts[0]; + if (remaining_parts.size() == 2) + m_available_ranges.insert(index + 1, move(remaining_parts[1])); +} + +Range RangeAllocator::allocate_anywhere(size_t size) +{ + for (int i = 0; i < m_available_ranges.size(); ++i) { + auto& available_range = m_available_ranges[i]; + if (available_range.size() < size) + continue; + Range allocated_range(available_range.base(), size); + if (available_range.size() == size) { +#ifdef VRA_DEBUG + dbgprintf("VRA: Allocated perfect-fit anywhere(%u): %x\n", size, allocated_range.base().get()); +#endif + m_available_ranges.remove(i); + return allocated_range; + } + carve_at_index(i, allocated_range); +#ifdef VRA_DEBUG + dbgprintf("VRA: Allocated anywhere(%u): %x\n", size, allocated_range.base().get()); + dump(); +#endif + return allocated_range; + } + kprintf("VRA: Failed to allocate anywhere: %u\n", size); + return { }; +} + +Range RangeAllocator::allocate_specific(LinearAddress base, size_t size) +{ + Range allocated_range(base, size); + for (int i = 0; i < m_available_ranges.size(); ++i) { + auto& available_range = m_available_ranges[i]; + if (!available_range.contains(base, size)) + continue; + if (available_range == allocated_range) { + m_available_ranges.remove(i); + return allocated_range; + } + carve_at_index(i, allocated_range); +#ifdef VRA_DEBUG + dbgprintf("VRA: Allocated specific(%u): %x\n", size, available_range.base().get()); + dump(); +#endif + return allocated_range; + } + kprintf("VRA: Failed to allocate specific range: %x(%u)\n", base.get(), size); + return { }; +} + +void RangeAllocator::deallocate(Range range) +{ +#ifdef VRA_DEBUG + dbgprintf("VRA: Deallocate: %x(%u)\n", range.base().get(), range.size()); + dump(); +#endif + + for (auto& available_range : m_available_ranges) { + if (available_range.end() == range.base()) { + available_range.m_size += range.size(); + goto sort_and_merge; + } + } + m_available_ranges.append(range); + +sort_and_merge: + // FIXME: We don't have to sort if we insert at the right position immediately. + quick_sort(m_available_ranges.begin(), m_available_ranges.end(), [] (auto& a, auto& b) { + return a.base() < b.base(); + }); + + Vector merged_ranges; + merged_ranges.ensure_capacity(m_available_ranges.size()); + + for (auto& range : m_available_ranges) { + if (merged_ranges.is_empty()) { + merged_ranges.append(range); + continue; + } + if (range.base() == merged_ranges.last().end()) { + merged_ranges.last().m_size += range.size(); + continue; + } + merged_ranges.append(range); + } + + m_available_ranges = move(merged_ranges); + +#ifdef VRA_DEBUG + dbgprintf("VRA: After deallocate\n"); + dump(); +#endif +} diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h new file mode 100644 index 0000000000..0c2a7759cb --- /dev/null +++ b/Kernel/VM/RangeAllocator.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + +class Range { + friend class RangeAllocator; +public: + Range() { } + Range(LinearAddress base, size_t size) + : m_base(base) + , m_size(size) + { + } + + LinearAddress base() const { return m_base; } + size_t size() const { return m_size; } + bool is_valid() const { return m_base.is_null(); } + + LinearAddress end() const { return m_base.offset(m_size); } + + bool operator==(const Range& other) const + { + return m_base == other.m_base && m_size == other.m_size; + } + + bool contains(LinearAddress base, size_t size) const + { + return base >= m_base && base.offset(size) <= end(); + } + + bool contains(const Range& other) const + { + return contains(other.base(), other.size()); + } + + Vector carve(const Range&); + +private: + LinearAddress m_base; + size_t m_size { 0 }; +}; + +class RangeAllocator { +public: + RangeAllocator(LinearAddress, size_t); + ~RangeAllocator(); + + Range allocate_anywhere(size_t); + Range allocate_specific(LinearAddress, size_t); + void deallocate(Range); + + void dump() const; + +private: + void carve_at_index(int, const Range&); + + Vector m_available_ranges; +};