1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 07:38:10 +00:00

Kernel: Make Kernel::VMObject allocation functions return KResultOr

This makes for nicer handling of errors compared to checking whether a
RefPtr is null. Additionally, this will give way to return different
types of errors in the future.
This commit is contained in:
sin-ack 2021-08-15 09:07:59 +00:00 committed by Andreas Kling
parent 61c0e3ca92
commit 4bfd6e41b9
26 changed files with 194 additions and 122 deletions

View file

@ -119,13 +119,17 @@ KResult UHCIController::reset()
}
// Let's allocate the physical page for the Frame List (which is 4KiB aligned)
auto framelist_vmobj = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
m_framelist = MM.allocate_kernel_region_with_vmobject(*framelist_vmobj, PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write);
auto maybe_framelist_vmobj = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
if (maybe_framelist_vmobj.is_error())
return maybe_framelist_vmobj.error();
m_framelist = MM.allocate_kernel_region_with_vmobject(maybe_framelist_vmobj.release_value(), PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write);
dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr());
dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr());
write_sofmod(64); // 1mS frame time
create_structures();
if (auto result = create_structures(); result.is_error())
return result;
setup_schedule();
write_flbaseadd(m_framelist->physical_page(0)->paddr().get()); // Frame list (physical) address
@ -139,12 +143,15 @@ KResult UHCIController::reset()
return KSuccess;
}
UNMAP_AFTER_INIT void UHCIController::create_structures()
UNMAP_AFTER_INIT KResult UHCIController::create_structures()
{
// Let's allocate memory for both the QH and TD pools
// First the QH pool and all of the Interrupt QH's
auto qh_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
m_qh_pool = MM.allocate_kernel_region_with_vmobject(*qh_pool_vmobject, 2 * PAGE_SIZE, "UHCI Queue Head Pool", Memory::Region::Access::Write);
auto maybe_qh_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
if (maybe_qh_pool_vmobject.is_error())
return maybe_qh_pool_vmobject.error();
m_qh_pool = MM.allocate_kernel_region_with_vmobject(maybe_qh_pool_vmobject.release_value(), 2 * PAGE_SIZE, "UHCI Queue Head Pool", Memory::Region::Access::Write);
memset(m_qh_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE); // Zero out both pages
// Let's populate our free qh list (so we have some we can allocate later on)
@ -163,8 +170,10 @@ UNMAP_AFTER_INIT void UHCIController::create_structures()
m_dummy_qh = allocate_queue_head();
// Now the Transfer Descriptor pool
auto td_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
m_td_pool = MM.allocate_kernel_region_with_vmobject(*td_pool_vmobject, 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Memory::Region::Access::Write);
auto maybe_td_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
if (maybe_td_pool_vmobject.is_error())
return maybe_td_pool_vmobject.error();
m_td_pool = MM.allocate_kernel_region_with_vmobject(maybe_td_pool_vmobject.release_value(), 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Memory::Region::Access::Write);
memset(m_td_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE);
// Set up the Isochronous Transfer Descriptor list
@ -209,6 +218,8 @@ UNMAP_AFTER_INIT void UHCIController::create_structures()
dbgln(" qh_pool: {}, length: {}", PhysicalAddress(m_qh_pool->physical_page(0)->paddr()), m_qh_pool->range().size());
dbgln(" td_pool: {}, length: {}", PhysicalAddress(m_td_pool->physical_page(0)->paddr()), m_td_pool->range().size());
}
return KSuccess;
}
UNMAP_AFTER_INIT void UHCIController::setup_schedule()

View file

@ -69,7 +69,7 @@ private:
virtual bool handle_irq(const RegisterState&) override;
void create_structures();
KResult create_structures();
void setup_schedule();
size_t poll_transfer_queue(QueueHead& transfer_queue);

View file

@ -28,10 +28,11 @@ KResult KCOVInstance::buffer_allocate(size_t buffer_size_in_entries)
// - we allocate one kernel region using that vmobject
// - when an mmap call comes in, we allocate another userspace region,
// backed by the same vmobject
this->vmobject = Memory::AnonymousVMObject::try_create_with_size(
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_with_size(
this->m_buffer_size_in_bytes, AllocationStrategy::AllocateNow);
if (!this->vmobject)
return ENOMEM;
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
this->vmobject = maybe_vmobject.release_value();
this->m_kernel_region = MM.allocate_kernel_region_with_vmobject(
*this->vmobject, this->m_buffer_size_in_bytes, String::formatted("kcov_{}", this->m_pid),

View file

@ -47,13 +47,14 @@ KResultOr<Memory::Region*> MemoryDevice::mmap(Process& process, FileDescription&
return EINVAL;
}
auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(viewed_address, range.size());
if (!vmobject)
return ENOMEM;
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(viewed_address, range.size());
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
dbgln("MemoryDevice: Mapped physical memory at {} for range of {} bytes", viewed_address, range.size());
return process.address_space().allocate_region_with_vmobject(
range,
vmobject.release_nonnull(),
maybe_vmobject.release_value(),
0,
"Mapped Physical Memory",
prot,

View file

@ -238,10 +238,11 @@ KResultOr<size_t> SB16::write(FileDescription&, u64, const UserOrKernelBuffer& d
if (!page)
return ENOMEM;
auto nonnull_page = page.release_nonnull();
auto vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 });
if (!vmobject)
return ENOMEM;
m_dma_region = MM.allocate_kernel_region_with_vmobject(*vmobject, PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write);
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 });
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
m_dma_region = MM.allocate_kernel_region_with_vmobject(maybe_vmobject.release_value(), PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write);
if (!m_dma_region)
return ENOMEM;
}

View file

@ -88,7 +88,8 @@ UNMAP_AFTER_INIT void BochsGraphicsAdapter::initialize_framebuffer_devices()
{
// FIXME: Find a better way to determine default resolution...
m_framebuffer_device = FramebufferDevice::create(*this, 0, PhysicalAddress(PCI::get_BAR0(pci_address()) & 0xfffffff0), 1024, 768, 1024 * sizeof(u32));
m_framebuffer_device->initialize();
// FIXME: Would be nice to be able to return a KResult here.
VERIFY(!m_framebuffer_device->initialize().is_error());
}
GraphicsDevice::Type BochsGraphicsAdapter::type() const

View file

@ -36,18 +36,20 @@ KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescrip
if (range.size() != Memory::page_round_up(framebuffer_size_in_bytes()))
return EOVERFLOW;
auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (!vmobject)
return ENOMEM;
m_userspace_real_framebuffer_vmobject = vmobject;
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
m_userspace_real_framebuffer_vmobject = maybe_vmobject.release_value();
m_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (!m_real_framebuffer_vmobject)
return ENOMEM;
auto maybe_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (maybe_real_framebuffer_vmobject.is_error())
return maybe_real_framebuffer_vmobject.error();
m_real_framebuffer_vmobject = maybe_real_framebuffer_vmobject.release_value();
m_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
if (!m_swapped_framebuffer_vmobject)
return ENOMEM;
auto maybe_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
if (maybe_swapped_framebuffer_vmobject.is_error())
return maybe_swapped_framebuffer_vmobject.error();
m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value();
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite);
if (!m_real_framebuffer_region)
@ -107,16 +109,29 @@ String FramebufferDevice::device_name() const
return String::formatted("fb{}", minor());
}
UNMAP_AFTER_INIT void FramebufferDevice::initialize()
UNMAP_AFTER_INIT KResult FramebufferDevice::initialize()
{
m_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
VERIFY(m_real_framebuffer_vmobject);
// FIXME: Would be nice to be able to unify this with mmap above, but this
// function is UNMAP_AFTER_INIT for the time being.
auto maybe_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes()));
if (maybe_real_framebuffer_vmobject.is_error())
return maybe_real_framebuffer_vmobject.error();
m_real_framebuffer_vmobject = maybe_real_framebuffer_vmobject.release_value();
auto maybe_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
if (maybe_swapped_framebuffer_vmobject.is_error())
return maybe_swapped_framebuffer_vmobject.error();
m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value();
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite);
VERIFY(m_real_framebuffer_region);
m_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow);
VERIFY(m_swapped_framebuffer_vmobject);
if (!m_real_framebuffer_region)
return ENOMEM;
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite);
VERIFY(m_swapped_framebuffer_region);
if (!m_swapped_framebuffer_region)
return ENOMEM;
return KSuccess;
}
UNMAP_AFTER_INIT FramebufferDevice::FramebufferDevice(const GraphicsDevice& adapter, size_t output_port_index, PhysicalAddress addr, size_t width, size_t height, size_t pitch)

View file

@ -34,7 +34,7 @@ public:
size_t framebuffer_size_in_bytes() const;
virtual ~FramebufferDevice() {};
void initialize();
KResult initialize();
private:
// ^File

View file

@ -639,6 +639,7 @@ void IntelNativeGraphicsAdapter::initialize_framebuffer_devices()
VERIFY(m_framebuffer_height != 0);
VERIFY(m_framebuffer_width != 0);
m_framebuffer_device = FramebufferDevice::create(*this, 0, address, m_framebuffer_width, m_framebuffer_height, m_framebuffer_pitch);
m_framebuffer_device->initialize();
// FIXME: Would be nice to be able to return a KResult here.
VERIFY(!m_framebuffer_device->initialize().is_error());
}
}

View file

@ -33,7 +33,8 @@ UNMAP_AFTER_INIT void VGACompatibleAdapter::initialize_framebuffer_devices()
VERIFY(m_framebuffer_height != 0);
VERIFY(m_framebuffer_pitch != 0);
m_framebuffer_device = FramebufferDevice::create(*this, 0, m_framebuffer_address, m_framebuffer_width, m_framebuffer_height, m_framebuffer_pitch);
m_framebuffer_device->initialize();
// FIXME: Would be nice to be able to return KResult here.
VERIFY(!m_framebuffer_device->initialize().is_error());
}
UNMAP_AFTER_INIT VGACompatibleAdapter::VGACompatibleAdapter(PCI::Address address)

View file

@ -15,15 +15,18 @@ FrameBufferDevice::FrameBufferDevice(GPU& virtio_gpu, ScanoutID scanout)
, m_gpu(virtio_gpu)
, m_scanout(scanout)
{
if (display_info().enabled)
create_framebuffer();
if (display_info().enabled) {
// FIXME: This should be in a place where we can handle allocation failures.
auto result = create_framebuffer();
VERIFY(!result.is_error());
}
}
FrameBufferDevice::~FrameBufferDevice()
{
}
void FrameBufferDevice::create_framebuffer()
KResult FrameBufferDevice::create_framebuffer()
{
// First delete any existing framebuffers to free the memory first
m_framebuffer = nullptr;
@ -40,12 +43,17 @@ void FrameBufferDevice::create_framebuffer()
for (auto i = 0u; i < num_needed_pages; ++i) {
pages.append(write_sink_page);
}
m_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span());
auto maybe_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span());
if (maybe_framebuffer_sink_vmobject.is_error())
return maybe_framebuffer_sink_vmobject.error();
m_framebuffer_sink_vmobject = maybe_framebuffer_sink_vmobject.release_value();
MutexLocker locker(m_gpu.operation_lock());
m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());
create_buffer(m_main_buffer, 0, m_buffer_size);
create_buffer(m_back_buffer, m_buffer_size, m_buffer_size);
return KSuccess;
}
void FrameBufferDevice::create_buffer(Buffer& buffer, size_t framebuffer_offset, size_t framebuffer_size)
@ -124,7 +132,10 @@ bool FrameBufferDevice::try_to_set_resolution(size_t width, size_t height)
.width = (u32)width,
.height = (u32)height,
};
create_framebuffer();
// FIXME: Would be nice to be able to return KResultOr here.
if (auto result = create_framebuffer(); result.is_error())
return false;
return true;
}
@ -255,9 +266,18 @@ KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescrip
if (m_userspace_mmap_region)
return ENOMEM;
auto vmobject = m_are_writes_active ? m_framebuffer->vmobject().try_clone() : m_framebuffer_sink_vmobject;
RefPtr<Memory::VMObject> vmobject;
if (m_are_writes_active) {
auto maybe_vmobject = m_framebuffer->vmobject().try_clone();
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
vmobject = maybe_vmobject.release_value();
} else {
vmobject = m_framebuffer_sink_vmobject;
if (vmobject.is_null())
return ENOMEM;
}
auto result = process.address_space().allocate_region_with_vmobject(
range,
@ -277,9 +297,10 @@ void FrameBufferDevice::deactivate_writes()
m_are_writes_active = false;
if (m_userspace_mmap_region) {
auto* region = m_userspace_mmap_region.unsafe_ptr();
auto vm_object = m_framebuffer_sink_vmobject->try_clone();
VERIFY(vm_object);
region->set_vmobject(vm_object.release_nonnull());
auto maybe_vm_object = m_framebuffer_sink_vmobject->try_clone();
// FIXME: Would be nice to be able to return a KResult here.
VERIFY(!maybe_vm_object.is_error());
region->set_vmobject(maybe_vm_object.release_value());
region->remap();
}
set_buffer(0);

View file

@ -56,7 +56,7 @@ private:
Protocol::DisplayInfoResponse::Display const& display_info() const;
Protocol::DisplayInfoResponse::Display& display_info();
void create_framebuffer();
KResult create_framebuffer();
void create_buffer(Buffer&, size_t, size_t);
void set_buffer(int);

View file

@ -277,11 +277,13 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_region(PhysicalAddress paddr, size_t size)
{
auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size);
VERIFY(vmobject);
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size);
// FIXME: Would be nice to be able to return a KResultOr from here.
VERIFY(!maybe_vmobject.is_error());
auto region = MM.allocate_kernel_region_with_vmobject(
Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size },
vmobject.release_nonnull(),
maybe_vmobject.release_value(),
{},
Memory::Region::Access::ReadWriteExecute);
VERIFY(region);

View file

@ -170,10 +170,10 @@ KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_
KResultOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
{
VERIFY(range.is_valid());
auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
if (!vmobject)
return ENOMEM;
auto region = Region::try_create_user_accessible(range, vmobject.release_nonnull(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
auto maybe_vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
auto region = Region::try_create_user_accessible(range, maybe_vmobject.release_value(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
if (!region)
return ENOMEM;
if (!region->map(page_directory()))

View file

@ -13,7 +13,7 @@
namespace Kernel::Memory {
RefPtr<VMObject> AnonymousVMObject::try_clone()
KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
{
// We need to acquire our lock so we copy a sane state
ScopedSpinLock lock(m_lock);
@ -21,9 +21,11 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
if (is_purgeable() && is_volatile()) {
// If this object is purgeable+volatile, create a new zero-filled purgeable+volatile
// object, effectively "pre-purging" it in the child process.
auto clone = try_create_purgeable_with_size(size(), AllocationStrategy::None);
if (!clone)
return {};
auto maybe_clone = try_create_purgeable_with_size(size(), AllocationStrategy::None);
if (maybe_clone.is_error())
return maybe_clone.error();
auto clone = maybe_clone.release_value();
clone->m_volatile = true;
return clone;
}
@ -38,7 +40,7 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed);
if (!committed_pages.has_value())
return {};
return ENOMEM;
// Create or replace the committed cow pages. When cloning a previously
// cloned vmobject, we want to essentially "fork", leaving us and the
@ -49,11 +51,12 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
auto new_shared_committed_cow_pages = try_create<SharedCommittedCowPages>(committed_pages.release_value());
if (!new_shared_committed_cow_pages)
return {};
return ENOMEM;
auto clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, *new_shared_committed_cow_pages));
if (!clone)
return {};
auto maybe_clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, new_shared_committed_cow_pages.release_nonnull()));
if (!maybe_clone)
return ENOMEM;
auto clone = maybe_clone.release_nonnull();
m_shared_committed_cow_pages = move(new_shared_committed_cow_pages);
@ -76,52 +79,58 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
return clone;
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
if (!committed_pages.has_value())
return {};
return ENOMEM;
}
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
{
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
if (contiguous_physical_pages.is_empty())
return {};
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
return ENOMEM;
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
if (!committed_pages.has_value())
return {};
return ENOMEM;
}
auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
if (!vmobject)
return {};
return ENOMEM;
vmobject->m_purgeable = true;
return vmobject;
return vmobject.release_nonnull();
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
{
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(physical_pages));
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(physical_pages));
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
{
if (paddr.offset(size) < paddr) {
dbgln("Shenanigans! try_create_for_physical_range({}, {}) would wrap around", paddr, size);
return nullptr;
// Since we can't wrap around yet, let's pretend to OOM.
return ENOMEM;
}
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(paddr, size));
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, size));
}
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)

View file

@ -18,12 +18,12 @@ class AnonymousVMObject final : public VMObject {
public:
virtual ~AnonymousVMObject() override;
static RefPtr<AnonymousVMObject> try_create_with_size(size_t, AllocationStrategy);
static RefPtr<AnonymousVMObject> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static RefPtr<AnonymousVMObject> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static RefPtr<AnonymousVMObject> try_create_purgeable_with_size(size_t, AllocationStrategy);
static RefPtr<AnonymousVMObject> try_create_physically_contiguous_with_size(size_t);
virtual RefPtr<VMObject> try_clone() override;
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_size(size_t, AllocationStrategy);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
static KResultOr<NonnullRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t);
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);

View file

@ -706,38 +706,39 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
auto vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
if (!vmobject) {
auto maybe_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
if (maybe_vmobject.is_error()) {
kernel_page_directory().range_allocator().deallocate(range.value());
// FIXME: Would be nice to be able to return a KResultOr from here.
return {};
}
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), maybe_vmobject.release_value(), name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
if (!vm_object)
auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
if (maybe_vm_object.is_error())
return {};
ScopedSpinLock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
return allocate_kernel_region_with_vmobject(range.value(), vm_object.release_nonnull(), name, access, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
if (!vm_object)
auto maybe_vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
if (maybe_vm_object.is_error())
return {};
VERIFY(!(size % PAGE_SIZE));
ScopedSpinLock lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)

View file

@ -14,9 +14,9 @@ RefPtr<PrivateInodeVMObject> PrivateInodeVMObject::try_create_with_inode(Inode&
return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(inode, inode.size()));
}
RefPtr<VMObject> PrivateInodeVMObject::try_clone()
KResultOr<NonnullRefPtr<VMObject>> PrivateInodeVMObject::try_clone()
{
return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(*this));
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) PrivateInodeVMObject(*this));
}
PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, size_t size)

View file

@ -19,7 +19,7 @@ public:
virtual ~PrivateInodeVMObject() override;
static RefPtr<PrivateInodeVMObject> try_create_with_inode(Inode&);
virtual RefPtr<VMObject> try_clone() override;
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
private:
virtual bool is_private_inode() const override { return true; }

View file

@ -75,14 +75,14 @@ OwnPtr<Region> Region::clone()
if (vmobject().is_inode())
VERIFY(vmobject().is_private_inode());
auto vmobject_clone = vmobject().try_clone();
if (!vmobject_clone)
auto maybe_vmobject_clone = vmobject().try_clone();
if (maybe_vmobject_clone.is_error())
return {};
// Set up a COW region. The parent (this) region becomes COW as well!
remap();
auto clone_region = Region::try_create_user_accessible(
m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr<KString> {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
m_range, maybe_vmobject_clone.release_value(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr<KString> {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
if (!clone_region) {
dbgln("Region::clone: Unable to allocate new Region for COW");
return nullptr;

View file

@ -10,10 +10,12 @@ namespace Kernel::Memory {
RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
{
auto vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
if (!vm_object)
auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
if (maybe_vm_object.is_error()) {
// FIXME: Would be nice to be able to return a KResultOr here.
return {};
return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(vm_object.release_nonnull(), request, device_block_size));
}
return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(maybe_vm_object.release_value(), request, device_block_size));
}
ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)

View file

@ -21,9 +21,9 @@ RefPtr<SharedInodeVMObject> SharedInodeVMObject::try_create_with_inode(Inode& in
return vmobject;
}
RefPtr<VMObject> SharedInodeVMObject::try_clone()
KResultOr<NonnullRefPtr<VMObject>> SharedInodeVMObject::try_clone()
{
return adopt_ref_if_nonnull(new (nothrow) SharedInodeVMObject(*this));
return adopt_nonnull_ref_or_enomem<VMObject>(new (nothrow) SharedInodeVMObject(*this));
}
SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size)

View file

@ -17,7 +17,7 @@ class SharedInodeVMObject final : public InodeVMObject {
public:
static RefPtr<SharedInodeVMObject> try_create_with_inode(Inode&);
virtual RefPtr<VMObject> try_clone() override;
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() override;
private:
virtual bool is_shared_inode() const override { return true; }

View file

@ -33,7 +33,7 @@ class VMObject : public RefCounted<VMObject>
public:
virtual ~VMObject();
virtual RefPtr<VMObject> try_clone() = 0;
virtual KResultOr<NonnullRefPtr<VMObject>> try_clone() = 0;
virtual bool is_anonymous() const { return false; }
virtual bool is_inode() const { return false; }

View file

@ -29,11 +29,11 @@ KResultOr<FlatPtr> Process::sys$anon_create(size_t size, int options)
if (new_fd_or_error.is_error())
return new_fd_or_error.error();
auto new_fd = new_fd_or_error.release_value();
auto vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve);
if (!vmobject)
return ENOMEM;
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve);
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
auto anon_file = AnonymousFile::create(vmobject.release_nonnull());
auto anon_file = AnonymousFile::create(maybe_vmobject.release_value());
if (!anon_file)
return ENOMEM;
auto description_or_error = FileDescription::create(*anon_file);

View file

@ -219,12 +219,17 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
if (map_anonymous) {
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
RefPtr<Memory::AnonymousVMObject> vmobject;
if (flags & MAP_PURGEABLE)
vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy);
else
vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy);
if (!vmobject)
return ENOMEM;
if (flags & MAP_PURGEABLE) {
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy);
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
vmobject = maybe_vmobject.release_value();
} else {
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy);
if (maybe_vmobject.is_error())
return maybe_vmobject.error();
vmobject = maybe_vmobject.release_value();
}
auto region_or_error = address_space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared);
if (region_or_error.is_error())
return region_or_error.error().error();