1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 06:37:35 +00:00

Kernel: Optimize single physical page allocation and randomize returns

Rather than trying to find a contiguous set of bits of size 1, just
find one single available bit using a hint.

Also, try to randomize returned physical pages a bit by placing them
into a 256 entry queue rather than making them available immediately.
Then, once the queue is filled, pick a random one, make it available
again and use that slot for the latest page to be returned.
This commit is contained in:
Tom 2020-09-07 23:18:38 -06:00 committed by Andreas Kling
parent 92e400c7f9
commit efe2b75017
2 changed files with 55 additions and 10 deletions

View file

@ -47,18 +47,19 @@ public:
PhysicalAddress lower() const { return m_lower; }
PhysicalAddress upper() const { return m_upper; }
unsigned size() const { return m_pages; }
unsigned used() const { return m_used; }
unsigned free() const { return m_pages - m_used; }
unsigned used() const { return m_used - m_recently_returned.size(); }
unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor);
void return_page_at(PhysicalAddress addr);
void return_page(const PhysicalPage& page) { return_page_at(page.paddr()); }
void return_page(const PhysicalPage& page);
private:
unsigned find_contiguous_free_pages(size_t count);
Optional<unsigned> find_and_allocate_contiguous_range(size_t count);
Optional<unsigned> find_one_free_page();
void free_page_at(PhysicalAddress addr);
PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);
@ -67,6 +68,8 @@ private:
unsigned m_pages { 0 };
unsigned m_used { 0 };
Bitmap m_bitmap;
size_t m_free_hint { 0 };
Vector<PhysicalAddress, 256> m_recently_returned;
};
}