mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 19:37:35 +00:00
Kernel: Make Kernel::VMObject allocation functions return KResultOr
This makes for nicer handling of errors compared to checking whether a RefPtr is null. Additionally, this will give way to return different types of errors in the future.
This commit is contained in:
parent
61c0e3ca92
commit
4bfd6e41b9
26 changed files with 194 additions and 122 deletions
|
@ -170,10 +170,10 @@ KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_
|
|||
KResultOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
|
||||
{
|
||||
VERIFY(range.is_valid());
|
||||
auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
|
||||
if (!vmobject)
|
||||
return ENOMEM;
|
||||
auto region = Region::try_create_user_accessible(range, vmobject.release_nonnull(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
|
||||
auto maybe_vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
|
||||
if (maybe_vmobject.is_error())
|
||||
return maybe_vmobject.error();
|
||||
auto region = Region::try_create_user_accessible(range, maybe_vmobject.release_value(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
|
||||
if (!region)
|
||||
return ENOMEM;
|
||||
if (!region->map(page_directory()))
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
RefPtr<VMObject> AnonymousVMObject::try_clone()
|
||||
KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
|
||||
{
|
||||
// We need to acquire our lock so we copy a sane state
|
||||
ScopedSpinLock lock(m_lock);
|
||||
|
@ -21,9 +21,11 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
|
|||
if (is_purgeable() && is_volatile()) {
|
||||
// If this object is purgeable+volatile, create a new zero-filled purgeable+volatile
|
||||
// object, effectively "pre-purging" it in the child process.
|
||||
auto clone = try_create_purgeable_with_size(size(), AllocationStrategy::None);
|
||||
if (!clone)
|
||||
return {};
|
||||
auto maybe_clone = try_create_purgeable_with_size(size(), AllocationStrategy::None);
|
||||
if (maybe_clone.is_error())
|
||||
return maybe_clone.error();
|
||||
|
||||
auto clone = maybe_clone.release_value();
|
||||
clone->m_volatile = true;
|
||||
return clone;
|
||||
}
|
||||
|
@ -38,7 +40,7 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
|
|||
|
||||
auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed);
|
||||
if (!committed_pages.has_value())
|
||||
return {};
|
||||
return ENOMEM;
|
||||
|
||||
// Create or replace the committed cow pages. When cloning a previously
|
||||
// cloned vmobject, we want to essentially "fork", leaving us and the
|
||||
|
@ -49,11 +51,12 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
|
|||
auto new_shared_committed_cow_pages = try_create<SharedCommittedCowPages>(committed_pages.release_value());
|
||||
|
||||
if (!new_shared_committed_cow_pages)
|
||||
return {};
|
||||
return ENOMEM;
|
||||
|
||||
auto clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, *new_shared_committed_cow_pages));
|
||||
if (!clone)
|
||||
return {};
|
||||
auto maybe_clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, new_shared_committed_cow_pages.release_nonnull()));
|
||||
if (!maybe_clone)
|
||||
return ENOMEM;
|
||||
auto clone = maybe_clone.release_nonnull();
|
||||
|
||||
m_shared_committed_cow_pages = move(new_shared_committed_cow_pages);
|
||||
|
||||
|
@ -76,52 +79,58 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
|
|||
return clone;
|
||||
}
|
||||
|
||||
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
|
||||
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
|
||||
{
|
||||
Optional<CommittedPhysicalPageSet> committed_pages;
|
||||
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
|
||||
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
|
||||
if (!committed_pages.has_value())
|
||||
return {};
|
||||
return ENOMEM;
|
||||
}
|
||||
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
|
||||
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
|
||||
}
|
||||
|
||||
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
|
||||
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
|
||||
{
|
||||
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
|
||||
if (contiguous_physical_pages.is_empty())
|
||||
return {};
|
||||
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
|
||||
return ENOMEM;
|
||||
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
|
||||
}
|
||||
|
||||
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
|
||||
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
|
||||
{
|
||||
Optional<CommittedPhysicalPageSet> committed_pages;
|
||||
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
|
||||
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
|
||||
if (!committed_pages.has_value())
|
||||
return {};
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
|
||||
if (!vmobject)
|
||||
return {};
|
||||
return ENOMEM;
|
||||
|
||||
vmobject->m_purgeable = true;
|
||||
return vmobject;
|
||||
return vmobject.release_nonnull();
|
||||
}
|
||||
|
||||
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
|
||||
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
|
||||
{
|
||||
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(physical_pages));
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(physical_pages));
|
||||
}
|
||||
|
||||
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
if (paddr.offset(size) < paddr) {
|
||||
dbgln("Shenanigans! try_create_for_physical_range({}, {}) would wrap around", paddr, size);
|
||||
return nullptr;
|
||||
// Since we can't wrap around yet, let's pretend to OOM.
|
||||
return ENOMEM;
|
||||
}
|
||||
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(paddr, size));
|
||||
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, size));
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
|
||||
|
|
|
@ -18,12 +18,12 @@ class AnonymousVMObject final : public VMObject {
|
|||
public:
|
||||
virtual ~AnonymousVMObject() override;
|
||||
|
||||
static RefPtr<AnonymousVMObject> try_create_with_size(size_t, AllocationStrategy);
|
||||
static RefPtr<AnonymousVMObject> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||
static RefPtr<AnonymousVMObject> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
|
||||
static RefPtr<AnonymousVMObject> try_create_purgeable_with_size(size_t, AllocationStrategy);
|
||||
static RefPtr<AnonymousVMObject> try_create_physically_contiguous_with_size(size_t);
|
||||
virtual RefPtr<VMObject> try_clone() override;
|
||||
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
|
||||
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
|
||||
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
|
||||
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
|
||||
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
|
||||
|
||||
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
|
||||
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
|
||||
|
|
|
@ -706,38 +706,39 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str
|
|||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
auto vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
|
||||
if (!vmobject) {
|
||||
auto maybe_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
|
||||
if (maybe_vmobject.is_error()) {
|
||||
kernel_page_directory().range_allocator().deallocate(range.value());
|
||||
// FIXME: Would be nice to be able to return a KResultOr from here.
|
||||
return {};
|
||||
}
|
||||
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, cacheable);
|
||||
return allocate_kernel_region_with_vmobject(range.value(), maybe_vmobject.release_value(), name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
|
||||
if (!vm_object)
|
||||
auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
|
||||
if (maybe_vm_object.is_error())
|
||||
return {};
|
||||
ScopedSpinLock lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
return allocate_kernel_region_with_vmobject(range.value(), vm_object.release_nonnull(), name, access, cacheable);
|
||||
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
|
||||
if (!vm_object)
|
||||
auto maybe_vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
|
||||
if (maybe_vm_object.is_error())
|
||||
return {};
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
|
||||
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
|
|
|
@ -14,9 +14,9 @@ RefPtr<PrivateInodeVMObject> PrivateInodeVMObject::try_create_with_inode(Inode&
|
|||
return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(inode, inode.size()));
|
||||
}
|
||||
|
||||
RefPtr<VMObject> PrivateInodeVMObject::try_clone()
|
||||
KResultOr<NonnullRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
|
||||
{
|
||||
return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(*this));
|
||||
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this));
|
||||
}
|
||||
|
||||
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, size_t size)
|
||||
|
|
|
@ -19,7 +19,7 @@ public:
|
|||
virtual ~PrivateInodeVMObject() override;
|
||||
|
||||
static RefPtr<PrivateInodeVMObject> try_create_with_inode(Inode&);
|
||||
virtual RefPtr<VMObject> try_clone() override;
|
||||
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
|
||||
|
||||
private:
|
||||
virtual bool is_private_inode() const override { return true; }
|
||||
|
|
|
@ -75,14 +75,14 @@ OwnPtr<Region> Region::clone()
|
|||
if (vmobject().is_inode())
|
||||
VERIFY(vmobject().is_private_inode());
|
||||
|
||||
auto vmobject_clone = vmobject().try_clone();
|
||||
if (!vmobject_clone)
|
||||
auto maybe_vmobject_clone = vmobject().try_clone();
|
||||
if (maybe_vmobject_clone.is_error())
|
||||
return {};
|
||||
|
||||
// Set up a COW region. The parent (this) region becomes COW as well!
|
||||
remap();
|
||||
auto clone_region = Region::try_create_user_accessible(
|
||||
m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr<KString> {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
|
||||
m_range, maybe_vmobject_clone.release_value(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr<KString> {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
|
||||
if (!clone_region) {
|
||||
dbgln("Region::clone: Unable to allocate new Region for COW");
|
||||
return nullptr;
|
||||
|
|
|
@ -10,10 +10,12 @@ namespace Kernel::Memory {
|
|||
|
||||
RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
|
||||
{
|
||||
auto vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
|
||||
if (!vm_object)
|
||||
auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
|
||||
if (maybe_vm_object.is_error()) {
|
||||
// FIXME: Would be nice to be able to return a KResultOr here.
|
||||
return {};
|
||||
return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(vm_object.release_nonnull(), request, device_block_size));
|
||||
}
|
||||
return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(maybe_vm_object.release_value(), request, device_block_size));
|
||||
}
|
||||
|
||||
ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
|
||||
|
|
|
@ -21,9 +21,9 @@ RefPtr<SharedInodeVMObject> SharedInodeVMObject::try_create_with_inode(Inode& in
|
|||
return vmobject;
|
||||
}
|
||||
|
||||
RefPtr<VMObject> SharedInodeVMObject::try_clone()
|
||||
KResultOr<NonnullRefPtr<VMObject>> SharedInodeVMObject::try_clone()
|
||||
{
|
||||
return adopt_ref_if_nonnull(new (nothrow) SharedInodeVMObject(*this));
|
||||
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this));
|
||||
}
|
||||
|
||||
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size)
|
||||
|
|
|
@ -17,7 +17,7 @@ class SharedInodeVMObject final : public InodeVMObject {
|
|||
|
||||
public:
|
||||
static RefPtr<SharedInodeVMObject> try_create_with_inode(Inode&);
|
||||
virtual RefPtr<VMObject> try_clone() override;
|
||||
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
|
||||
|
||||
private:
|
||||
virtual bool is_shared_inode() const override { return true; }
|
||||
|
|
|
@ -33,7 +33,7 @@ class VMObject : public RefCounted<VMObject>
|
|||
public:
|
||||
virtual ~VMObject();
|
||||
|
||||
virtual RefPtr<VMObject> try_clone() = 0;
|
||||
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() = 0;
|
||||
|
||||
virtual bool is_anonymous() const { return false; }
|
||||
virtual bool is_inode() const { return false; }
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue