1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 21:37:35 +00:00

Kernel: Make kernel region allocators return KResultOr<NOP<Region>>

This expands the reach of error propagation greatly throughout the
kernel. Sadly, it also exposes the fact that we're allocating (and
doing other fallible things) in constructors all over the place.

This patch doesn't attempt to address that of course. That's work for
our future selves.
This commit is contained in:
Andreas Kling 2021-09-06 01:36:14 +02:00
parent cb71a73708
commit 75564b4a5f
40 changed files with 173 additions and 193 deletions

View file

@ -57,7 +57,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
{
dmesgln("PCI: Using MMIO for PCI configuration space access");
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::ReadWrite);
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::ReadWrite).release_value();
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG Table length to choose the correct mapping size");
auto* sdt = (ACPI::Structures::SDTHeader*)checkup_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
u32 length = sdt->length;
@ -66,7 +66,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
checkup_region->unmap();
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::ReadWrite);
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::ReadWrite).release_value();
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get()));
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
// PCI::PhysicalID objects to the vector, because get_capabilities calls
// PCI::read16 which will need this region to be mapped.
u8 start_bus = m_segments.get(0).value().get_start_bus();
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite);
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite).release_value();
m_mapped_bus = start_bus;
dbgln_if(PCI_DEBUG, "PCI: First PCI ECAM Mapped region for starting bus {} @ {} {}", start_bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
@ -102,7 +102,7 @@ void MMIOAccess::map_bus_region(u32 segment, u8 bus)
VERIFY(m_access_lock.is_locked());
if (m_mapped_bus == bus)
return;
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite);
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite).release_value();
m_mapped_bus = bus;
dbgln_if(PCI_DEBUG, "PCI: New PCI ECAM Mapped region for bus {} @ {} {}", bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
}

View file

@ -18,7 +18,7 @@ namespace PCI {
UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMapping(Address device_address, const MMIOAccess::MMIOSegment& mmio_segment)
: m_device_address(device_address)
, m_mapped_region(MM.allocate_kernel_region(Memory::page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Memory::Region::Access::ReadWrite).release_nonnull())
, m_mapped_region(MM.allocate_kernel_region(Memory::page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Memory::Region::Access::ReadWrite).release_value())
{
PhysicalAddress segment_lower_addr = mmio_segment.get_paddr();
PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset(

View file

@ -108,7 +108,7 @@ KResult UHCIController::reset()
// Let's allocate the physical page for the Frame List (which is 4KiB aligned)
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE));
m_framelist = MM.allocate_kernel_region_with_vmobject(move(vmobject), PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write);
m_framelist = TRY(MM.allocate_kernel_region_with_vmobject(move(vmobject), PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write));
dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr());
dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr());
write_sofmod(64); // 1mS frame time
@ -144,11 +144,7 @@ UNMAP_AFTER_INIT KResult UHCIController::create_structures()
m_transfer_descriptor_pool = TRY(UHCIDescriptorPool<TransferDescriptor>::try_create("Transfer Descriptor Pool"sv));
m_isochronous_transfer_pool = MM.allocate_kernel_region_with_vmobject(move(td_pool_vmobject), PAGE_SIZE, "UHCI Isochronous Descriptor Pool", Memory::Region::Access::ReadWrite);
if (!m_isochronous_transfer_pool) {
dmesgln("UHCI: Failed to allocated Isochronous Descriptor Pool!");
return ENOMEM;
}
m_isochronous_transfer_pool = TRY(MM.allocate_kernel_region_with_vmobject(move(td_pool_vmobject), PAGE_SIZE, "UHCI Isochronous Descriptor Pool", Memory::Region::Access::ReadWrite));
// Set up the Isochronous Transfer Descriptor list
m_iso_td_list.resize(UHCI_NUMBER_OF_ISOCHRONOUS_TDS);

View file

@ -30,11 +30,8 @@ class UHCIDescriptorPool {
public:
static KResultOr<NonnullOwnPtr<UHCIDescriptorPool<T>>> try_create(StringView name)
{
auto pool_memory_block = MM.allocate_kernel_region(PAGE_SIZE, "UHCI Descriptor Pool", Memory::Region::Access::ReadWrite);
if (!pool_memory_block)
return ENOMEM;
return adopt_nonnull_own_or_enomem(new (nothrow) UHCIDescriptorPool(pool_memory_block.release_nonnull(), name));
auto pool_memory_block = TRY(MM.allocate_kernel_region(PAGE_SIZE, "UHCI Descriptor Pool", Memory::Region::Access::ReadWrite));
return adopt_nonnull_own_or_enomem(new (nothrow) UHCIDescriptorPool(move(pool_memory_block), name));
}
~UHCIDescriptorPool() = default;

View file

@ -9,15 +9,12 @@
namespace Kernel::USB {
KResultOr<NonnullRefPtr<Transfer>> Transfer::try_create(Pipe& pipe, u16 len)
KResultOr<NonnullRefPtr<Transfer>> Transfer::try_create(Pipe& pipe, u16 length)
{
// Initialize data buffer for transfer
// This will definitely need to be refactored in the future, I doubt this will scale well...
auto data_buffer = MM.allocate_kernel_region(PAGE_SIZE, "USB Transfer Buffer", Memory::Region::Access::ReadWrite);
if (!data_buffer)
return ENOMEM;
return adopt_nonnull_ref_or_enomem(new (nothrow) Transfer(pipe, len, data_buffer.release_nonnull()));
auto region = TRY(MM.allocate_kernel_region(PAGE_SIZE, "USB Transfer Buffer", Memory::Region::Access::ReadWrite));
return adopt_nonnull_ref_or_enomem(new (nothrow) Transfer(pipe, length, move(region)));
}
Transfer::Transfer(Pipe& pipe, u16 len, NonnullOwnPtr<Memory::Region> data_buffer)

View file

@ -147,11 +147,14 @@ auto Device::mapping_for_bar(u8 bar) -> MappedMMIO&
{
VERIFY(m_use_mmio);
auto& mapping = m_mmio[bar];
if (!mapping.base) {
mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
if (!mapping.base)
dbgln("{}: Failed to map bar {}", VirtIO::determine_device_class(pci_address()), bar);
if (!mapping.base && mapping.size) {
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
if (region_or_error.is_error()) {
dbgln("{}: Failed to map bar {} - (size={}) {}", VirtIO::determine_device_class(pci_address()), bar, mapping.size, region_or_error.error());
} else {
mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
mapping.base = region_or_error.release_value();
}
}
return mapping;
}

View file

@ -19,10 +19,9 @@ Queue::Queue(u16 queue_size, u16 notify_offset)
size_t size_of_device = sizeof(QueueDevice) + queue_size * sizeof(QueueDeviceItem);
auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device);
if (queue_region_size <= PAGE_SIZE)
m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite);
m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
else
m_queue_region = MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite);
VERIFY(m_queue_region);
m_queue_region = MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
// TODO: ensure alignment!!!
u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size());

View file

@ -25,7 +25,7 @@ UNMAP_AFTER_INIT void RNG::initialize()
}
if (success) {
finish_init();
m_entropy_buffer = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIO::RNG", Memory::Region::Access::ReadWrite);
m_entropy_buffer = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIO::RNG", Memory::Region::Access::ReadWrite).release_value();
if (m_entropy_buffer) {
memset(m_entropy_buffer->vaddr().as_ptr(), 0, m_entropy_buffer->size());
request_entropy_from_host();