1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-28 03:07:35 +00:00

Kernel: Replace KResult and KResultOr<T> with Error and ErrorOr<T>

We now use AK::Error and AK::ErrorOr<T> in both kernel and userspace!
This was a slightly tedious refactoring that took a long time, so it's
not unlikely that some bugs crept in.

Nevertheless, it does pass basic functionality testing, and it's just
real nice to finally see the same pattern in all contexts. :^)
This commit is contained in:
Andreas Kling 2021-11-08 00:51:39 +01:00
parent 7ee10c6926
commit 79fa9765ca
262 changed files with 2415 additions and 2600 deletions

View file

@ -15,7 +15,7 @@
namespace Kernel::Memory {
KResultOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const* parent)
ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const* parent)
{
auto page_directory = TRY(PageDirectory::try_create_for_userspace(parent ? &parent->page_directory().range_allocator() : nullptr));
auto space = TRY(adopt_nonnull_own_or_enomem(new (nothrow) AddressSpace(page_directory)));
@ -32,7 +32,7 @@ AddressSpace::~AddressSpace()
{
}
KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
{
if (!size)
return EINVAL;
@ -49,7 +49,7 @@ KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
PerformanceManager::add_unmap_perf_event(Process::current(), whole_region->range());
deallocate_region(*whole_region);
return KSuccess;
return {};
}
if (auto* old_region = find_region_containing(range_to_unmap)) {
@ -77,13 +77,13 @@ KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
PerformanceManager::add_unmap_perf_event(Process::current(), range_to_unmap);
return KSuccess;
return {};
}
// Try again while checking multiple regions at a time.
auto const& regions = find_regions_intersecting(range_to_unmap);
if (regions.is_empty())
return KSuccess;
return {};
// Check if any of the regions is not mmap'ed, to not accidentally
// error out with just half a region map left.
@ -126,10 +126,10 @@ KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
PerformanceManager::add_unmap_perf_event(Process::current(), range_to_unmap);
return KSuccess;
return {};
}
KResultOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
ErrorOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = page_round_up(size);
@ -138,7 +138,7 @@ KResultOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, s
return page_directory().range_allocator().try_allocate_specific(vaddr, size);
}
KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
{
OwnPtr<KString> region_name;
if (!source_region.name().is_null())
@ -158,7 +158,7 @@ KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_
return region;
}
KResultOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
ErrorOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
{
VERIFY(range.is_valid());
OwnPtr<KString> region_name;
@ -170,7 +170,7 @@ KResultOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, Stri
return add_region(move(region));
}
KResultOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
VERIFY(range.is_valid());
size_t end_in_vmobject = offset_in_vmobject + range.size();
@ -264,7 +264,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
return regions;
}
KResultOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
ErrorOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
{
auto* ptr = region.ptr();
SpinlockLocker lock(m_lock);
@ -274,13 +274,13 @@ KResultOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
}
// Carve out a virtual address range from a region and return the two regions on either side
KResultOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
{
VirtualRange old_region_range = source_region.range();
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
VERIFY(!remaining_ranges_after_unmap.is_empty());
auto try_make_replacement_region = [&](VirtualRange const& new_range) -> KResultOr<Region*> {
auto try_make_replacement_region = [&](VirtualRange const& new_range) -> ErrorOr<Region*> {
VERIFY(old_region_range.contains(new_range));
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);

View file

@ -18,13 +18,13 @@ namespace Kernel::Memory {
class AddressSpace {
public:
static KResultOr<NonnullOwnPtr<AddressSpace>> try_create(AddressSpace const* parent);
static ErrorOr<NonnullOwnPtr<AddressSpace>> try_create(AddressSpace const* parent);
~AddressSpace();
PageDirectory& page_directory() { return *m_page_directory; }
const PageDirectory& page_directory() const { return *m_page_directory; }
KResultOr<Region*> add_region(NonnullOwnPtr<Region>);
ErrorOr<Region*> add_region(NonnullOwnPtr<Region>);
size_t region_count() const { return m_regions.size(); }
@ -33,17 +33,17 @@ public:
void dump_regions();
KResult unmap_mmap_range(VirtualAddress, size_t);
ErrorOr<void> unmap_mmap_range(VirtualAddress, size_t);
KResultOr<VirtualRange> try_allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
ErrorOr<VirtualRange> try_allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
KResultOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
KResultOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
ErrorOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
void deallocate_region(Region& region);
NonnullOwnPtr<Region> take_region(Region& region);
KResultOr<Region*> try_allocate_split_region(Region const& source_region, VirtualRange const&, size_t offset_in_vmobject);
KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, VirtualRange const&);
ErrorOr<Region*> try_allocate_split_region(Region const& source_region, VirtualRange const&, size_t offset_in_vmobject);
ErrorOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, VirtualRange const&);
Region* find_region_from_range(VirtualRange const&);
Region* find_region_containing(VirtualRange const&);

View file

@ -13,7 +13,7 @@
namespace Kernel::Memory {
KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
ErrorOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
{
// We need to acquire our lock so we copy a sane state
SpinlockLocker lock(m_lock);
@ -66,7 +66,7 @@ KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
return clone;
}
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
@ -76,7 +76,7 @@ KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_s
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
}
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
{
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
if (contiguous_physical_pages.is_empty())
@ -85,7 +85,7 @@ KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physic
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
}
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
@ -97,12 +97,12 @@ KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgea
return vmobject;
}
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
{
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(physical_pages));
}
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
{
if (paddr.offset(size) < paddr) {
dbgln("Shenanigans! try_create_for_physical_range({}, {}) would wrap around", paddr, size);
@ -182,7 +182,7 @@ size_t AnonymousVMObject::purge()
return total_pages_purged;
}
KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
ErrorOr<void> AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
{
VERIFY(is_purgeable());
@ -190,7 +190,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
was_purged = m_was_purged;
if (m_volatile == is_volatile)
return KSuccess;
return {};
if (is_volatile) {
// When a VMObject is made volatile, it gives up all of its committed memory.
@ -210,7 +210,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
m_was_purged = false;
for_each_region([&](auto& region) { region.remap(); });
return KSuccess;
return {};
}
// When a VMObject is made non-volatile, we try to commit however many pages are not currently available.
// If that fails, we return false to indicate that memory allocation failed.
@ -223,7 +223,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
if (!committed_pages_needed) {
m_volatile = false;
return KSuccess;
return {};
}
m_unused_committed_pages = TRY(MM.commit_user_physical_pages(committed_pages_needed));
@ -236,7 +236,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
m_volatile = false;
m_was_purged = false;
for_each_region([&](auto& region) { region.remap(); });
return KSuccess;
return {};
}
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)

View file

@ -18,12 +18,12 @@ class AnonymousVMObject final : public VMObject {
public:
virtual ~AnonymousVMObject() override;
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override;
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
@ -34,7 +34,7 @@ public:
bool is_purgeable() const { return m_purgeable; }
bool is_volatile() const { return m_volatile; }
KResult set_volatile(bool is_volatile, bool& was_purged);
ErrorOr<void> set_volatile(bool is_volatile, bool& was_purged);
size_t purge();

View file

@ -705,7 +705,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
return region->handle_fault(fault);
}
KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock());
@ -714,7 +714,7 @@ KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_regio
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
}
KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
@ -723,7 +723,7 @@ KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t si
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
}
KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
@ -732,7 +732,7 @@ KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalA
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
}
KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
{
OwnPtr<KString> name_kstring;
if (!name.is_null())
@ -742,7 +742,7 @@ KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmob
return region;
}
KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock());
@ -750,7 +750,7 @@ KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmob
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
}
KResultOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
SpinlockLocker lock(s_mm_lock);

View file

@ -171,7 +171,7 @@ public:
Yes
};
KResultOr<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
ErrorOr<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
@ -180,11 +180,11 @@ public:
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress);
KResultOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
struct SystemMemoryInfo {
PhysicalSize user_physical_pages { 0 };

View file

@ -42,7 +42,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_
return directory;
}
KResultOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator)
ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator)
{
constexpr FlatPtr userspace_range_base = 0x00800000;
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;

View file

@ -20,7 +20,7 @@ class PageDirectory : public RefCounted<PageDirectory> {
friend class MemoryManager;
public:
static KResultOr<NonnullRefPtr<PageDirectory>> try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator = nullptr);
static ErrorOr<NonnullRefPtr<PageDirectory>> try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator = nullptr);
static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory();
static RefPtr<PageDirectory> find_by_cr3(FlatPtr);

View file

@ -9,12 +9,12 @@
namespace Kernel::Memory {
KResultOr<NonnullRefPtr<PrivateInodeVMObject>> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
ErrorOr<NonnullRefPtr<PrivateInodeVMObject>> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
{
return adopt_nonnull_ref_or_enomem(new (nothrow) PrivateInodeVMObject(inode, inode.size()));
}
KResultOr<NonnullRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
ErrorOr<NonnullRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
{
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this));
}

View file

@ -18,8 +18,8 @@ class PrivateInodeVMObject final : public InodeVMObject {
public:
virtual ~PrivateInodeVMObject() override;
static KResultOr<NonnullRefPtr<PrivateInodeVMObject>> try_create_with_inode(Inode&);
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
static ErrorOr<NonnullRefPtr<PrivateInodeVMObject>> try_create_with_inode(Inode&);
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override;
private:
virtual bool is_private_inode() const override { return true; }

View file

@ -50,7 +50,7 @@ Region::~Region()
}
}
KResultOr<NonnullOwnPtr<Region>> Region::try_clone()
ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
{
VERIFY(Process::has_current());
@ -144,12 +144,12 @@ size_t Region::amount_shared() const
return bytes;
}
KResultOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
ErrorOr<NonnullOwnPtr<Region>> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
{
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
}
KResultOr<NonnullOwnPtr<Region>> Region::try_create_kernel_only(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> Region::try_create_kernel_only(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
{
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
}
@ -253,7 +253,7 @@ void Region::set_page_directory(PageDirectory& page_directory)
m_page_directory = page_directory;
}
KResult Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
ErrorOr<void> Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
{
SpinlockLocker page_lock(page_directory.get_lock());
SpinlockLocker lock(s_mm_lock);
@ -274,7 +274,7 @@ KResult Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_t
if (should_flush_tlb == ShouldFlushTLB::Yes)
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_index);
if (page_index == page_count())
return KSuccess;
return {};
}
return ENOMEM;
}

View file

@ -49,8 +49,8 @@ public:
Yes,
};
static KResultOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
static KResultOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
static ErrorOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
~Region();
@ -90,7 +90,7 @@ public:
PageFaultResponse handle_fault(PageFault const&);
KResultOr<NonnullOwnPtr<Region>> try_clone();
ErrorOr<NonnullOwnPtr<Region>> try_clone();
[[nodiscard]] bool contains(VirtualAddress vaddr) const
{
@ -170,7 +170,7 @@ public:
void set_executable(bool b) { set_access_bit(Access::Execute, b); }
void set_page_directory(PageDirectory&);
KResult map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
enum class ShouldDeallocateVirtualRange {
No,
Yes,

View file

@ -29,7 +29,7 @@ bool RingBuffer::copy_data_in(const UserOrKernelBuffer& buffer, size_t offset, s
return true;
}
KResultOr<size_t> RingBuffer::copy_data_out(size_t size, UserOrKernelBuffer& buffer) const
ErrorOr<size_t> RingBuffer::copy_data_out(size_t size, UserOrKernelBuffer& buffer) const
{
auto start = m_start_of_used % m_capacity_in_bytes;
auto num_bytes = min(min(m_num_used_bytes, size), m_capacity_in_bytes - start);
@ -37,7 +37,7 @@ KResultOr<size_t> RingBuffer::copy_data_out(size_t size, UserOrKernelBuffer& buf
return num_bytes;
}
KResultOr<PhysicalAddress> RingBuffer::reserve_space(size_t size)
ErrorOr<PhysicalAddress> RingBuffer::reserve_space(size_t size)
{
if (m_capacity_in_bytes < m_num_used_bytes + size)
return ENOSPC;

View file

@ -18,8 +18,8 @@ public:
bool has_space() const { return m_num_used_bytes < m_capacity_in_bytes; }
bool copy_data_in(const UserOrKernelBuffer& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied);
KResultOr<size_t> copy_data_out(size_t size, UserOrKernelBuffer& buffer) const;
KResultOr<PhysicalAddress> reserve_space(size_t size);
ErrorOr<size_t> copy_data_out(size_t size, UserOrKernelBuffer& buffer) const;
ErrorOr<PhysicalAddress> reserve_space(size_t size);
void reclaim_space(PhysicalAddress chunk_start, size_t chunk_size);
PhysicalAddress start_of_used() const;

View file

@ -12,7 +12,7 @@ RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest&
{
auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
if (maybe_vm_object.is_error()) {
// FIXME: Would be nice to be able to return a KResultOr here.
// FIXME: Would be nice to be able to return a ErrorOr here.
return {};
}
return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(maybe_vm_object.release_value(), request, device_block_size));

View file

@ -9,7 +9,7 @@
namespace Kernel::Memory {
KResultOr<NonnullRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode)
ErrorOr<NonnullRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_with_inode(Inode& inode)
{
size_t size = inode.size();
if (auto shared_vmobject = inode.shared_vmobject())
@ -19,7 +19,7 @@ KResultOr<NonnullRefPtr<SharedInodeVMObject>> SharedInodeVMObject::try_create_wi
return vmobject;
}
KResultOr<NonnullRefPtr<VMObject>> SharedInodeVMObject::try_clone()
ErrorOr<NonnullRefPtr<VMObject>> SharedInodeVMObject::try_clone()
{
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this));
}

View file

@ -16,8 +16,8 @@ class SharedInodeVMObject final : public InodeVMObject {
AK_MAKE_NONMOVABLE(SharedInodeVMObject);
public:
static KResultOr<NonnullRefPtr<SharedInodeVMObject>> try_create_with_inode(Inode&);
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
static ErrorOr<NonnullRefPtr<SharedInodeVMObject>> try_create_with_inode(Inode&);
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() override;
private:
virtual bool is_shared_inode() const override { return true; }

View file

@ -26,7 +26,7 @@ class VMObject
public:
virtual ~VMObject();
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() = 0;
virtual ErrorOr<NonnullRefPtr<VMObject>> try_clone() = 0;
virtual bool is_anonymous() const { return false; }
virtual bool is_inode() const { return false; }

View file

@ -36,7 +36,7 @@ VirtualRange VirtualRange::intersect(VirtualRange const& other) const
return VirtualRange(new_base, (new_end - new_base).get());
}
KResultOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size)
ErrorOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size)
{
if (page_round_up_would_wrap(size))
return EINVAL;

View file

@ -7,7 +7,7 @@
#pragma once
#include <Kernel/API/KResult.h>
#include <AK/Error.h>
#include <Kernel/VirtualAddress.h>
namespace Kernel::Memory {
@ -51,7 +51,7 @@ public:
Vector<VirtualRange, 2> carve(VirtualRange const&) const;
VirtualRange intersect(VirtualRange const&) const;
static KResultOr<VirtualRange> expand_to_page_boundaries(FlatPtr address, size_t size);
static ErrorOr<VirtualRange> expand_to_page_boundaries(FlatPtr address, size_t size);
private:
VirtualAddress m_base;

View file

@ -56,7 +56,7 @@ void VirtualRangeAllocator::carve_from_region(VirtualRange const& from, VirtualR
}
}
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t size, size_t alignment)
ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t size, size_t alignment)
{
if (!size)
return EINVAL;
@ -80,7 +80,7 @@ KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t si
return try_allocate_anywhere(size, alignment);
}
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size, size_t alignment)
ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size, size_t alignment)
{
if (!size)
return EINVAL;
@ -129,7 +129,7 @@ KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size
return ENOMEM;
}
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_specific(VirtualAddress base, size_t size)
ErrorOr<VirtualRange> VirtualRangeAllocator::try_allocate_specific(VirtualAddress base, size_t size)
{
if (!size)
return EINVAL;

View file

@ -21,9 +21,9 @@ public:
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(VirtualRangeAllocator const&);
KResultOr<VirtualRange> try_allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
KResultOr<VirtualRange> try_allocate_specific(VirtualAddress, size_t);
KResultOr<VirtualRange> try_allocate_randomized(size_t, size_t alignment);
ErrorOr<VirtualRange> try_allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
ErrorOr<VirtualRange> try_allocate_specific(VirtualAddress, size_t);
ErrorOr<VirtualRange> try_allocate_randomized(size_t, size_t alignment);
void deallocate(VirtualRange const&);
void dump() const;