1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 00:08:11 +00:00

Kernel: Make MemoryManager API type-safe for Region::Access enum

Increase type-safety moving the MemoryManager APIs which take a
Region::Access to actually use that type instead of a `u8`.

Eventually the actually m_access can be moved there as well, but
I hit some weird bug where it wasn't using the correct operators
in `set_access_bit(..)` even though it's declared (and tested).
Something to fix-up later.
This commit is contained in:
Brian Gianforcaro 2021-03-06 19:33:25 -08:00 committed by Andreas Kling
parent c825159f01
commit eaef57443c
6 changed files with 39 additions and 35 deletions

View file

@ -48,7 +48,7 @@ namespace Kernel {
class KBufferImpl : public RefCounted<KBufferImpl> { class KBufferImpl : public RefCounted<KBufferImpl> {
public: public:
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) static RefPtr<KBufferImpl> try_create_with_size(size_t size, Region::Access access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
auto region = MM.allocate_kernel_region(page_round_up(size), name, access, strategy); auto region = MM.allocate_kernel_region(page_round_up(size), name, access, strategy);
if (!region) if (!region)
@ -56,7 +56,7 @@ public:
return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy)); return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy));
} }
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, Region::Access access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
auto region = MM.allocate_kernel_region(page_round_up(bytes.size()), name, access, strategy); auto region = MM.allocate_kernel_region(page_round_up(bytes.size()), name, access, strategy);
if (!region) if (!region)
@ -65,12 +65,12 @@ public:
return adopt(*new KBufferImpl(region.release_nonnull(), bytes.size(), strategy)); return adopt(*new KBufferImpl(region.release_nonnull(), bytes.size(), strategy));
} }
static RefPtr<KBufferImpl> create_with_size(size_t size, u8 access, const char* name, AllocationStrategy strategy = AllocationStrategy::Reserve) static RefPtr<KBufferImpl> create_with_size(size_t size, Region::Access access, const char* name, AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
return try_create_with_size(size, access, name, strategy); return try_create_with_size(size, access, name, strategy);
} }
static RefPtr<KBufferImpl> copy(const void* data, size_t size, u8 access, const char* name) static RefPtr<KBufferImpl> copy(const void* data, size_t size, Region::Access access, const char* name)
{ {
auto buffer = create_with_size(size, access, name, AllocationStrategy::AllocateNow); auto buffer = create_with_size(size, access, name, AllocationStrategy::AllocateNow);
if (!buffer) if (!buffer)
@ -124,7 +124,7 @@ public:
{ {
} }
[[nodiscard]] static OwnPtr<KBuffer> try_create_with_size(size_t size, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) [[nodiscard]] static OwnPtr<KBuffer> try_create_with_size(size_t size, Region::Access access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
auto impl = KBufferImpl::try_create_with_size(size, access, name, strategy); auto impl = KBufferImpl::try_create_with_size(size, access, name, strategy);
if (!impl) if (!impl)
@ -132,7 +132,7 @@ public:
return adopt_own(*new KBuffer(impl.release_nonnull())); return adopt_own(*new KBuffer(impl.release_nonnull()));
} }
[[nodiscard]] static OwnPtr<KBuffer> try_create_with_bytes(ReadonlyBytes bytes, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) [[nodiscard]] static OwnPtr<KBuffer> try_create_with_bytes(ReadonlyBytes bytes, Region::Access access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
auto impl = KBufferImpl::try_create_with_bytes(bytes, access, name, strategy); auto impl = KBufferImpl::try_create_with_bytes(bytes, access, name, strategy);
if (!impl) if (!impl)
@ -140,12 +140,12 @@ public:
return adopt_own(*new KBuffer(impl.release_nonnull())); return adopt_own(*new KBuffer(impl.release_nonnull()));
} }
[[nodiscard]] static KBuffer create_with_size(size_t size, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) [[nodiscard]] static KBuffer create_with_size(size_t size, Region::Access access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
return KBuffer(KBufferImpl::create_with_size(size, access, name, strategy)); return KBuffer(KBufferImpl::create_with_size(size, access, name, strategy));
} }
[[nodiscard]] static KBuffer copy(const void* data, size_t size, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer") [[nodiscard]] static KBuffer copy(const void* data, size_t size, Region::Access access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer")
{ {
return KBuffer(KBufferImpl::copy(data, size, access, name)); return KBuffer(KBufferImpl::copy(data, size, access, name));
} }
@ -165,7 +165,7 @@ public:
[[nodiscard]] const KBufferImpl& impl() const { return *m_impl; } [[nodiscard]] const KBufferImpl& impl() const { return *m_impl; }
[[nodiscard]] RefPtr<KBufferImpl> take_impl() { return move(m_impl); } [[nodiscard]] RefPtr<KBufferImpl> take_impl() { return move(m_impl); }
KBuffer(const ByteBuffer& buffer, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer") KBuffer(const ByteBuffer& buffer, Region::Access access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer")
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access, name)) : m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access, name))
{ {
} }

View file

@ -489,7 +489,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault, lock); return region->handle_fault(fault, lock);
} }
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, String name, u8 access, size_t physical_alignment, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, String name, Region::Access access, size_t physical_alignment, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
@ -500,7 +500,7 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str
return allocate_kernel_region_with_vmobject(range.value(), vmobject, move(name), access, cacheable); return allocate_kernel_region_with_vmobject(range.value(), vmobject, move(name), access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, u8 access, AllocationStrategy strategy, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
@ -513,7 +513,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, u
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), move(name), access, cacheable); return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), move(name), access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
@ -526,7 +526,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable); return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
@ -539,7 +539,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable); return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, String name, u8 access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, String name, Region::Access access, Region::Cacheable cacheable)
{ {
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
auto region = Region::create_kernel_only(range, vmobject, 0, move(name), access, cacheable); auto region = Region::create_kernel_only(range, vmobject, 0, move(name), access, cacheable);
@ -548,7 +548,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range&
return region; return region;
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, String name, u8 access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, String name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);

View file

@ -163,12 +163,12 @@ public:
void deallocate_user_physical_page(const PhysicalPage&); void deallocate_user_physical_page(const PhysicalPage&);
void deallocate_supervisor_physical_page(const PhysicalPage&); void deallocate_supervisor_physical_page(const PhysicalPage&);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, String name, u8 access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_contiguous_kernel_region(size_t, String name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(size_t, String name, u8 access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region(size_t, String name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, String name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, String name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, String name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, String name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }

View file

@ -39,7 +39,7 @@
namespace Kernel { namespace Kernel {
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable, bool shared) Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, Region::Access access, Cacheable cacheable, bool shared)
: PurgeablePageRanges(vmobject) : PurgeablePageRanges(vmobject)
, m_range(range) , m_range(range)
, m_offset_in_vmobject(offset_in_vmobject) , m_offset_in_vmobject(offset_in_vmobject)
@ -104,7 +104,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
// Create a new region backed by the same VMObject. // Create a new region backed by the same VMObject.
auto region = Region::create_user_accessible( auto region = Region::create_user_accessible(
&new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access, m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared); &new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
if (m_vmobject->is_anonymous()) if (m_vmobject->is_anonymous())
region->copy_purgeable_page_ranges(*this); region->copy_purgeable_page_ranges(*this);
region->set_mmap(m_mmap); region->set_mmap(m_mmap);
@ -123,7 +123,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
// Set up a COW region. The parent (this) region becomes COW as well! // Set up a COW region. The parent (this) region becomes COW as well!
remap(); remap();
auto clone_region = Region::create_user_accessible( auto clone_region = Region::create_user_accessible(
&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access, m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared); &new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
if (m_vmobject->is_anonymous()) if (m_vmobject->is_anonymous())
clone_region->copy_purgeable_page_ranges(*this); clone_region->copy_purgeable_page_ranges(*this);
if (m_stack) { if (m_stack) {
@ -228,7 +228,7 @@ size_t Region::amount_shared() const
return bytes; return bytes;
} }
NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable, bool shared) NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, Region::Access access, Cacheable cacheable, bool shared)
{ {
auto region = adopt_own(*new Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared)); auto region = adopt_own(*new Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
if (owner) if (owner)
@ -236,7 +236,7 @@ NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range
return region; return region;
} }
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable) NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, Region::Access access, Cacheable cacheable)
{ {
return adopt_own(*new Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false)); return adopt_own(*new Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
} }

View file

@ -26,6 +26,7 @@
#pragma once #pragma once
#include <AK/EnumBits.h>
#include <AK/InlineLinkedList.h> #include <AK/InlineLinkedList.h>
#include <AK/String.h> #include <AK/String.h>
#include <AK/WeakPtr.h> #include <AK/WeakPtr.h>
@ -56,6 +57,7 @@ class Region final
MAKE_SLAB_ALLOCATED(Region) MAKE_SLAB_ALLOCATED(Region)
public: public:
enum Access : u8 { enum Access : u8 {
None = 0,
Read = 1, Read = 1,
Write = 2, Write = 2,
Execute = 4, Execute = 4,
@ -69,8 +71,8 @@ public:
Yes, Yes,
}; };
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, u8 access, Cacheable, bool shared); static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, Region::Access access, Cacheable, bool shared);
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, u8 access, Cacheable = Cacheable::Yes); static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, Region::Access access, Cacheable = Cacheable::Yes);
~Region(); ~Region();
@ -87,7 +89,7 @@ public:
bool is_cacheable() const { return m_cacheable; } bool is_cacheable() const { return m_cacheable; }
const String& name() const { return m_name; } const String& name() const { return m_name; }
unsigned access() const { return m_access; } Region::Access access() const { return static_cast<Region::Access>(m_access); }
void set_name(String name) { m_name = move(name); } void set_name(String name) { m_name = move(name); }
@ -249,7 +251,7 @@ public:
void set_syscall_region(bool b) { m_syscall_region = b; } void set_syscall_region(bool b) { m_syscall_region = b; }
private: private:
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String, u8 access, Cacheable, bool shared); Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String, Region::Access access, Cacheable, bool shared);
bool do_remap_vmobject_page_range(size_t page_index, size_t page_count); bool do_remap_vmobject_page_range(size_t page_index, size_t page_count);
@ -278,7 +280,7 @@ private:
size_t m_offset_in_vmobject { 0 }; size_t m_offset_in_vmobject { 0 };
NonnullRefPtr<VMObject> m_vmobject; NonnullRefPtr<VMObject> m_vmobject;
String m_name; String m_name;
u8 m_access { 0 }; u8 m_access { Region::None };
bool m_shared : 1 { false }; bool m_shared : 1 { false };
bool m_cacheable : 1 { false }; bool m_cacheable : 1 { false };
bool m_stack : 1 { false }; bool m_stack : 1 { false };
@ -287,9 +289,11 @@ private:
WeakPtr<Process> m_owner; WeakPtr<Process> m_owner;
}; };
inline unsigned prot_to_region_access_flags(int prot) AK_ENUM_BITWISE_OPERATORS(Region::Access)
inline Region::Access prot_to_region_access_flags(int prot)
{ {
unsigned access = 0; Region::Access access = Region::Access::None;
if (prot & PROT_READ) if (prot & PROT_READ)
access |= Region::Access::Read; access |= Region::Access::Read;
if (prot & PROT_WRITE) if (prot & PROT_WRITE)
@ -299,7 +303,7 @@ inline unsigned prot_to_region_access_flags(int prot)
return access; return access;
} }
inline int region_access_flags_to_prot(unsigned access) inline int region_access_flags_to_prot(Region::Access access)
{ {
int prot = 0; int prot = 0;
if (access & Region::Access::Read) if (access & Region::Access::Read)

View file

@ -44,7 +44,7 @@ struct TypedMapping {
}; };
template<typename T> template<typename T>
static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, u8 access = Region::Access::Read) static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, Region::Access access = Region::Access::Read)
{ {
TypedMapping<T> table; TypedMapping<T> table;
table.region = MM.allocate_kernel_region(paddr.page_base(), page_round_up(length), {}, access); table.region = MM.allocate_kernel_region(paddr.page_base(), page_round_up(length), {}, access);