1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 14:07:43 +00:00

Kernel: Convert Region to east-const style

This commit is contained in:
Andreas Kling 2021-07-22 14:25:41 +02:00
parent bf16591c07
commit e44a41d0bf
2 changed files with 15 additions and 15 deletions

View file

@ -19,7 +19,7 @@
namespace Kernel { namespace Kernel {
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared) Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: PurgeablePageRanges(vmobject) : PurgeablePageRanges(vmobject)
, m_range(range) , m_range(range)
, m_offset_in_vmobject(offset_in_vmobject) , m_offset_in_vmobject(offset_in_vmobject)
@ -184,14 +184,14 @@ size_t Region::cow_pages() const
{ {
if (!vmobject().is_anonymous()) if (!vmobject().is_anonymous())
return 0; return 0;
return static_cast<const AnonymousVMObject&>(vmobject()).cow_pages(); return static_cast<AnonymousVMObject const&>(vmobject()).cow_pages();
} }
size_t Region::amount_dirty() const size_t Region::amount_dirty() const
{ {
if (!vmobject().is_inode()) if (!vmobject().is_inode())
return amount_resident(); return amount_resident();
return static_cast<const InodeVMObject&>(vmobject()).amount_dirty(); return static_cast<InodeVMObject const&>(vmobject()).amount_dirty();
} }
size_t Region::amount_resident() const size_t Region::amount_resident() const
@ -216,7 +216,7 @@ size_t Region::amount_shared() const
return bytes; return bytes;
} }
OwnPtr<Region> Region::try_create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared) OwnPtr<Region> Region::try_create_user_accessible(Process* owner, Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
{ {
auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared)); auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
if (!region) if (!region)
@ -226,7 +226,7 @@ OwnPtr<Region> Region::try_create_user_accessible(Process* owner, const Range& r
return region; return region;
} }
OwnPtr<Region> Region::try_create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable) OwnPtr<Region> Region::try_create_kernel_only(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
{ {
return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false)); return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
} }
@ -235,7 +235,7 @@ bool Region::should_cow(size_t page_index) const
{ {
if (!vmobject().is_anonymous()) if (!vmobject().is_anonymous())
return false; return false;
return static_cast<const AnonymousVMObject&>(vmobject()).should_cow(first_page_index() + page_index, m_shared); return static_cast<AnonymousVMObject const&>(vmobject()).should_cow(first_page_index() + page_index, m_shared);
} }
void Region::set_should_cow(size_t page_index, bool cow) void Region::set_should_cow(size_t page_index, bool cow)
@ -406,7 +406,7 @@ void Region::remap()
map(*m_page_directory); map(*m_page_directory);
} }
PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<RecursiveSpinLock>& mm_lock) PageFaultResponse Region::handle_fault(PageFault const& fault, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
{ {
auto page_index_in_region = page_index_from_address(fault.vaddr()); auto page_index_in_region = page_index_from_address(fault.vaddr());
if (fault.type() == PageFault::Type::PageNotPresent) { if (fault.type() == PageFault::Type::PageNotPresent) {

View file

@ -49,12 +49,12 @@ public:
Yes, Yes,
}; };
static OwnPtr<Region> try_create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared); static OwnPtr<Region> try_create_user_accessible(Process*, Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
static OwnPtr<Region> try_create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes); static OwnPtr<Region> try_create_kernel_only(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
~Region(); ~Region();
const Range& range() const { return m_range; } Range const& range() const { return m_range; }
VirtualAddress vaddr() const { return m_range.base(); } VirtualAddress vaddr() const { return m_range.base(); }
size_t size() const { return m_range.size(); } size_t size() const { return m_range.size(); }
bool is_readable() const { return m_access & Access::Read; } bool is_readable() const { return m_access & Access::Read; }
@ -72,7 +72,7 @@ public:
void set_name(OwnPtr<KString> name) { m_name = move(name); } void set_name(OwnPtr<KString> name) { m_name = move(name); }
const VMObject& vmobject() const { return *m_vmobject; } VMObject const& vmobject() const { return *m_vmobject; }
VMObject& vmobject() { return *m_vmobject; } VMObject& vmobject() { return *m_vmobject; }
void set_vmobject(NonnullRefPtr<VMObject>&&); void set_vmobject(NonnullRefPtr<VMObject>&&);
@ -88,7 +88,7 @@ public:
bool is_user() const { return !is_kernel(); } bool is_user() const { return !is_kernel(); }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_base; } bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_base; }
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&); PageFaultResponse handle_fault(PageFault const&, ScopedSpinLock<RecursiveSpinLock>&);
OwnPtr<Region> clone(Process&); OwnPtr<Region> clone(Process&);
@ -97,7 +97,7 @@ public:
return m_range.contains(vaddr); return m_range.contains(vaddr);
} }
bool contains(const Range& range) const bool contains(Range const& range) const
{ {
return m_range.contains(range); return m_range.contains(range);
} }
@ -165,7 +165,7 @@ public:
return size() / PAGE_SIZE; return size() / PAGE_SIZE;
} }
const PhysicalPage* physical_page(size_t index) const PhysicalPage const* physical_page(size_t index) const
{ {
VERIFY(index < page_count()); VERIFY(index < page_count());
return vmobject().physical_pages()[first_page_index() + index]; return vmobject().physical_pages()[first_page_index() + index];
@ -226,7 +226,7 @@ public:
void set_syscall_region(bool b) { m_syscall_region = b; } void set_syscall_region(bool b) { m_syscall_region = b; }
private: private:
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); Region(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
bool do_remap_vmobject_page_range(size_t page_index, size_t page_count); bool do_remap_vmobject_page_range(size_t page_index, size_t page_count);