mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 19:27:35 +00:00
Kernel: Return an already destructed PhysicalPage to the allocators
By making sure the PhysicalPage instance is fully destructed the allocators will have a chance to reclaim the PhysicalPageEntry for free-list purposes. Just pass them the physical address of the page that was freed, which is enough to lookup the PhysicalPageEntry later.
This commit is contained in:
parent
87dc4c3d2c
commit
c1006a3689
6 changed files with 32 additions and 28 deletions
|
@ -29,13 +29,13 @@ public:
|
|||
unsigned size() const { return m_pages; }
|
||||
unsigned used() const { return m_used - m_recently_returned.size(); }
|
||||
unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
|
||||
bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
|
||||
bool contains(PhysicalAddress paddr) const { return paddr >= m_lower && paddr <= m_upper; }
|
||||
|
||||
NonnullRefPtr<PhysicalRegion> take_pages_from_beginning(unsigned);
|
||||
|
||||
RefPtr<PhysicalPage> take_free_page(bool supervisor);
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE);
|
||||
void return_page(const PhysicalPage& page);
|
||||
void return_page(PhysicalAddress);
|
||||
|
||||
private:
|
||||
unsigned find_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue