1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 04:17:35 +00:00

Kernel: Make kernel region allocators return KResultOr<NOP<Region>>

This expands the reach of error propagation greatly throughout the
kernel. Sadly, it also exposes the fact that we're allocating (and
doing other fallible things) in constructors all over the place.

This patch doesn't attempt to address that of course. That's work for
our future selves.
This commit is contained in:
Andreas Kling 2021-09-06 01:36:14 +02:00
parent cb71a73708
commit 75564b4a5f
40 changed files with 173 additions and 193 deletions

View file

@ -162,7 +162,7 @@ Memory::MappedROM map_bios()
Memory::MappedROM mapping; Memory::MappedROM mapping;
mapping.size = 128 * KiB; mapping.size = 128 * KiB;
mapping.paddr = PhysicalAddress(0xe0000); mapping.paddr = PhysicalAddress(0xe0000);
mapping.region = MM.allocate_kernel_region(mapping.paddr, Memory::page_round_up(mapping.size), {}, Memory::Region::Access::Read); mapping.region = MM.allocate_kernel_region(mapping.paddr, Memory::page_round_up(mapping.size), {}, Memory::Region::Access::Read).release_value();
return mapping; return mapping;
} }
@ -176,7 +176,7 @@ Memory::MappedROM map_ebda()
size_t ebda_size = (*ebda_length_ptr_b1 << 8) | *ebda_length_ptr_b0; size_t ebda_size = (*ebda_length_ptr_b1 << 8) | *ebda_length_ptr_b0;
Memory::MappedROM mapping; Memory::MappedROM mapping;
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), Memory::page_round_up(ebda_size), {}, Memory::Region::Access::Read); mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), Memory::page_round_up(ebda_size), {}, Memory::Region::Access::Read).release_value();
mapping.offset = ebda_paddr.offset_in_page(); mapping.offset = ebda_paddr.offset_in_page();
mapping.size = ebda_size; mapping.size = ebda_size;
mapping.paddr = ebda_paddr; mapping.paddr = ebda_paddr;

View file

@ -57,7 +57,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
{ {
dmesgln("PCI: Using MMIO for PCI configuration space access"); dmesgln("PCI: Using MMIO for PCI configuration space access");
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::ReadWrite); auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::ReadWrite).release_value();
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG Table length to choose the correct mapping size"); dbgln_if(PCI_DEBUG, "PCI: Checking MCFG Table length to choose the correct mapping size");
auto* sdt = (ACPI::Structures::SDTHeader*)checkup_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr(); auto* sdt = (ACPI::Structures::SDTHeader*)checkup_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
u32 length = sdt->length; u32 length = sdt->length;
@ -66,7 +66,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
dbgln("PCI: MCFG, length: {}, revision: {}", length, revision); dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
checkup_region->unmap(); checkup_region->unmap();
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::ReadWrite); auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::ReadWrite).release_value();
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr(); auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get())); dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get()));
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
// PCI::PhysicalID objects to the vector, because get_capabilities calls // PCI::PhysicalID objects to the vector, because get_capabilities calls
// PCI::read16 which will need this region to be mapped. // PCI::read16 which will need this region to be mapped.
u8 start_bus = m_segments.get(0).value().get_start_bus(); u8 start_bus = m_segments.get(0).value().get_start_bus();
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite); m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite).release_value();
m_mapped_bus = start_bus; m_mapped_bus = start_bus;
dbgln_if(PCI_DEBUG, "PCI: First PCI ECAM Mapped region for starting bus {} @ {} {}", start_bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr()); dbgln_if(PCI_DEBUG, "PCI: First PCI ECAM Mapped region for starting bus {} @ {} {}", start_bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
@ -102,7 +102,7 @@ void MMIOAccess::map_bus_region(u32 segment, u8 bus)
VERIFY(m_access_lock.is_locked()); VERIFY(m_access_lock.is_locked());
if (m_mapped_bus == bus) if (m_mapped_bus == bus)
return; return;
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite); m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::ReadWrite).release_value();
m_mapped_bus = bus; m_mapped_bus = bus;
dbgln_if(PCI_DEBUG, "PCI: New PCI ECAM Mapped region for bus {} @ {} {}", bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr()); dbgln_if(PCI_DEBUG, "PCI: New PCI ECAM Mapped region for bus {} @ {} {}", bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
} }

View file

@ -18,7 +18,7 @@ namespace PCI {
UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMapping(Address device_address, const MMIOAccess::MMIOSegment& mmio_segment) UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMapping(Address device_address, const MMIOAccess::MMIOSegment& mmio_segment)
: m_device_address(device_address) : m_device_address(device_address)
, m_mapped_region(MM.allocate_kernel_region(Memory::page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Memory::Region::Access::ReadWrite).release_nonnull()) , m_mapped_region(MM.allocate_kernel_region(Memory::page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Memory::Region::Access::ReadWrite).release_value())
{ {
PhysicalAddress segment_lower_addr = mmio_segment.get_paddr(); PhysicalAddress segment_lower_addr = mmio_segment.get_paddr();
PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset( PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset(

View file

@ -108,7 +108,7 @@ KResult UHCIController::reset()
// Let's allocate the physical page for the Frame List (which is 4KiB aligned) // Let's allocate the physical page for the Frame List (which is 4KiB aligned)
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE)); auto vmobject = TRY(Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE));
m_framelist = MM.allocate_kernel_region_with_vmobject(move(vmobject), PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write); m_framelist = TRY(MM.allocate_kernel_region_with_vmobject(move(vmobject), PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write));
dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr()); dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr());
dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr()); dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr());
write_sofmod(64); // 1mS frame time write_sofmod(64); // 1mS frame time
@ -144,11 +144,7 @@ UNMAP_AFTER_INIT KResult UHCIController::create_structures()
m_transfer_descriptor_pool = TRY(UHCIDescriptorPool<TransferDescriptor>::try_create("Transfer Descriptor Pool"sv)); m_transfer_descriptor_pool = TRY(UHCIDescriptorPool<TransferDescriptor>::try_create("Transfer Descriptor Pool"sv));
m_isochronous_transfer_pool = MM.allocate_kernel_region_with_vmobject(move(td_pool_vmobject), PAGE_SIZE, "UHCI Isochronous Descriptor Pool", Memory::Region::Access::ReadWrite); m_isochronous_transfer_pool = TRY(MM.allocate_kernel_region_with_vmobject(move(td_pool_vmobject), PAGE_SIZE, "UHCI Isochronous Descriptor Pool", Memory::Region::Access::ReadWrite));
if (!m_isochronous_transfer_pool) {
dmesgln("UHCI: Failed to allocated Isochronous Descriptor Pool!");
return ENOMEM;
}
// Set up the Isochronous Transfer Descriptor list // Set up the Isochronous Transfer Descriptor list
m_iso_td_list.resize(UHCI_NUMBER_OF_ISOCHRONOUS_TDS); m_iso_td_list.resize(UHCI_NUMBER_OF_ISOCHRONOUS_TDS);

View file

@ -30,11 +30,8 @@ class UHCIDescriptorPool {
public: public:
static KResultOr<NonnullOwnPtr<UHCIDescriptorPool<T>>> try_create(StringView name) static KResultOr<NonnullOwnPtr<UHCIDescriptorPool<T>>> try_create(StringView name)
{ {
auto pool_memory_block = MM.allocate_kernel_region(PAGE_SIZE, "UHCI Descriptor Pool", Memory::Region::Access::ReadWrite); auto pool_memory_block = TRY(MM.allocate_kernel_region(PAGE_SIZE, "UHCI Descriptor Pool", Memory::Region::Access::ReadWrite));
if (!pool_memory_block) return adopt_nonnull_own_or_enomem(new (nothrow) UHCIDescriptorPool(move(pool_memory_block), name));
return ENOMEM;
return adopt_nonnull_own_or_enomem(new (nothrow) UHCIDescriptorPool(pool_memory_block.release_nonnull(), name));
} }
~UHCIDescriptorPool() = default; ~UHCIDescriptorPool() = default;

View file

@ -9,15 +9,12 @@
namespace Kernel::USB { namespace Kernel::USB {
KResultOr<NonnullRefPtr<Transfer>> Transfer::try_create(Pipe& pipe, u16 len) KResultOr<NonnullRefPtr<Transfer>> Transfer::try_create(Pipe& pipe, u16 length)
{ {
// Initialize data buffer for transfer // Initialize data buffer for transfer
// This will definitely need to be refactored in the future, I doubt this will scale well... // This will definitely need to be refactored in the future, I doubt this will scale well...
auto data_buffer = MM.allocate_kernel_region(PAGE_SIZE, "USB Transfer Buffer", Memory::Region::Access::ReadWrite); auto region = TRY(MM.allocate_kernel_region(PAGE_SIZE, "USB Transfer Buffer", Memory::Region::Access::ReadWrite));
if (!data_buffer) return adopt_nonnull_ref_or_enomem(new (nothrow) Transfer(pipe, length, move(region)));
return ENOMEM;
return adopt_nonnull_ref_or_enomem(new (nothrow) Transfer(pipe, len, data_buffer.release_nonnull()));
} }
Transfer::Transfer(Pipe& pipe, u16 len, NonnullOwnPtr<Memory::Region> data_buffer) Transfer::Transfer(Pipe& pipe, u16 len, NonnullOwnPtr<Memory::Region> data_buffer)

View file

@ -147,11 +147,14 @@ auto Device::mapping_for_bar(u8 bar) -> MappedMMIO&
{ {
VERIFY(m_use_mmio); VERIFY(m_use_mmio);
auto& mapping = m_mmio[bar]; auto& mapping = m_mmio[bar];
if (!mapping.base) { if (!mapping.base && mapping.size) {
mapping.size = PCI::get_BAR_space_size(pci_address(), bar); auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No); if (region_or_error.is_error()) {
if (!mapping.base) dbgln("{}: Failed to map bar {} - (size={}) {}", VirtIO::determine_device_class(pci_address()), bar, mapping.size, region_or_error.error());
dbgln("{}: Failed to map bar {}", VirtIO::determine_device_class(pci_address()), bar); } else {
mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
mapping.base = region_or_error.release_value();
}
} }
return mapping; return mapping;
} }

View file

@ -19,10 +19,9 @@ Queue::Queue(u16 queue_size, u16 notify_offset)
size_t size_of_device = sizeof(QueueDevice) + queue_size * sizeof(QueueDeviceItem); size_t size_of_device = sizeof(QueueDevice) + queue_size * sizeof(QueueDeviceItem);
auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device); auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device);
if (queue_region_size <= PAGE_SIZE) if (queue_region_size <= PAGE_SIZE)
m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite); m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
else else
m_queue_region = MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite); m_queue_region = MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
VERIFY(m_queue_region);
// TODO: ensure alignment!!! // TODO: ensure alignment!!!
u8* ptr = m_queue_region->vaddr().as_ptr(); u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size()); memset(ptr, 0, m_queue_region->size());

View file

@ -25,7 +25,7 @@ UNMAP_AFTER_INIT void RNG::initialize()
} }
if (success) { if (success) {
finish_init(); finish_init();
m_entropy_buffer = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIO::RNG", Memory::Region::Access::ReadWrite); m_entropy_buffer = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIO::RNG", Memory::Region::Access::ReadWrite).release_value();
if (m_entropy_buffer) { if (m_entropy_buffer) {
memset(m_entropy_buffer->vaddr().as_ptr(), 0, m_entropy_buffer->size()); memset(m_entropy_buffer->vaddr().as_ptr(), 0, m_entropy_buffer->size());
request_entropy_from_host(); request_entropy_from_host();

View file

@ -33,11 +33,9 @@ KResult KCOVInstance::buffer_allocate(size_t buffer_size_in_entries)
return maybe_vmobject.error(); return maybe_vmobject.error();
m_vmobject = maybe_vmobject.release_value(); m_vmobject = maybe_vmobject.release_value();
m_kernel_region = MM.allocate_kernel_region_with_vmobject( m_kernel_region = TRY(MM.allocate_kernel_region_with_vmobject(
*m_vmobject, m_buffer_size_in_bytes, String::formatted("kcov_{}", m_pid), *m_vmobject, m_buffer_size_in_bytes, String::formatted("kcov_{}", m_pid),
Memory::Region::Access::ReadWrite); Memory::Region::Access::ReadWrite));
if (!m_kernel_region)
return ENOMEM;
m_buffer = (u64*)m_kernel_region->vaddr().as_ptr(); m_buffer = (u64*)m_kernel_region->vaddr().as_ptr();
if (!has_buffer()) if (!has_buffer())

View file

@ -263,9 +263,7 @@ KResultOr<size_t> SB16::write(FileDescription&, u64, UserOrKernelBuffer const& d
return ENOMEM; return ENOMEM;
auto nonnull_page = page.release_nonnull(); auto nonnull_page = page.release_nonnull();
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 })); auto vmobject = TRY(Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 }));
m_dma_region = MM.allocate_kernel_region_with_vmobject(move(vmobject), PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write); m_dma_region = TRY(MM.allocate_kernel_region_with_vmobject(move(vmobject), PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write));
if (!m_dma_region)
return ENOMEM;
} }
dbgln_if(SB16_DEBUG, "SB16: Writing buffer of {} bytes", length); dbgln_if(SB16_DEBUG, "SB16: Writing buffer of {} bytes", length);

View file

@ -28,8 +28,9 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s
m_pitch = pitch; m_pitch = pitch;
dbgln("Framebuffer Console: taking {} bytes", Memory::page_round_up(pitch * height)); dbgln("Framebuffer Console: taking {} bytes", Memory::page_round_up(pitch * height));
m_framebuffer_region = MM.allocate_kernel_region(m_framebuffer_address, Memory::page_round_up(pitch * height), "Framebuffer Console", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes); auto region_or_error = MM.allocate_kernel_region(m_framebuffer_address, Memory::page_round_up(pitch * height), "Framebuffer Console", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
VERIFY(m_framebuffer_region); VERIFY(!region_or_error.is_error());
m_framebuffer_region = region_or_error.release_value();
// Just to start cleanly, we clean the entire framebuffer // Just to start cleanly, we clean the entire framebuffer
memset(m_framebuffer_region->vaddr().as_ptr(), 0, pitch * height); memset(m_framebuffer_region->vaddr().as_ptr(), 0, pitch * height);

View file

@ -11,7 +11,7 @@ namespace Kernel::Graphics {
UNMAP_AFTER_INIT VGAConsole::VGAConsole(const VGACompatibleAdapter& adapter, Mode mode, size_t width, size_t height) UNMAP_AFTER_INIT VGAConsole::VGAConsole(const VGACompatibleAdapter& adapter, Mode mode, size_t width, size_t height)
: Console(width, height) : Console(width, height)
, m_vga_region(MM.allocate_kernel_region(PhysicalAddress(0xa0000), Memory::page_round_up(0xc0000 - 0xa0000), "VGA Display", Memory::Region::Access::ReadWrite).release_nonnull()) , m_vga_region(MM.allocate_kernel_region(PhysicalAddress(0xa0000), Memory::page_round_up(0xc0000 - 0xa0000), "VGA Display", Memory::Region::Access::ReadWrite).release_value())
, m_adapter(adapter) , m_adapter(adapter)
, m_mode(mode) , m_mode(mode)
{ {

View file

@ -51,13 +51,8 @@ KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescrip
return maybe_swapped_framebuffer_vmobject.error(); return maybe_swapped_framebuffer_vmobject.error();
m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value(); m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value();
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite); m_real_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite));
if (!m_real_framebuffer_region) m_swapped_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite));
return ENOMEM;
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite);
if (!m_swapped_framebuffer_region)
return ENOMEM;
RefPtr<Memory::VMObject> chosen_vmobject; RefPtr<Memory::VMObject> chosen_vmobject;
if (m_graphical_writes_enabled) { if (m_graphical_writes_enabled) {
@ -123,13 +118,8 @@ UNMAP_AFTER_INIT KResult FramebufferDevice::initialize()
return maybe_swapped_framebuffer_vmobject.error(); return maybe_swapped_framebuffer_vmobject.error();
m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value(); m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value();
m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite); m_real_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite));
if (!m_real_framebuffer_region) m_swapped_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite));
return ENOMEM;
m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite);
if (!m_swapped_framebuffer_region)
return ENOMEM;
return KSuccess; return KSuccess;
} }

View file

@ -189,7 +189,11 @@ IntelNativeGraphicsAdapter::IntelNativeGraphicsAdapter(PCI::Address address)
VERIFY(bar0_space_size == 0x80000); VERIFY(bar0_space_size == 0x80000);
dmesgln("Intel Native Graphics Adapter @ {}, MMIO @ {}, space size is {:x} bytes", address, PhysicalAddress(PCI::get_BAR0(address)), bar0_space_size); dmesgln("Intel Native Graphics Adapter @ {}, MMIO @ {}, space size is {:x} bytes", address, PhysicalAddress(PCI::get_BAR0(address)), bar0_space_size);
dmesgln("Intel Native Graphics Adapter @ {}, framebuffer @ {}", address, PhysicalAddress(PCI::get_BAR2(address))); dmesgln("Intel Native Graphics Adapter @ {}, framebuffer @ {}", address, PhysicalAddress(PCI::get_BAR2(address)));
m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::ReadWrite); auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::ReadWrite);
if (region_or_error.is_error()) {
TODO();
}
m_registers_region = region_or_error.release_value();
PCI::enable_bus_mastering(address); PCI::enable_bus_mastering(address);
{ {
SpinlockLocker control_lock(m_control_lock); SpinlockLocker control_lock(m_control_lock);

View file

@ -35,7 +35,7 @@ KResult FrameBufferDevice::create_framebuffer()
// Allocate frame buffer for both front and back // Allocate frame buffer for both front and back
auto& info = display_info(); auto& info = display_info();
m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height); m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height);
m_framebuffer = MM.allocate_kernel_region(m_buffer_size * 2, String::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); m_framebuffer = TRY(MM.allocate_kernel_region(m_buffer_size * 2, String::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
auto write_sink_page = MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No).release_nonnull(); auto write_sink_page = MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No).release_nonnull();
auto num_needed_pages = m_framebuffer->vmobject().page_count(); auto num_needed_pages = m_framebuffer->vmobject().page_count();
@ -43,10 +43,7 @@ KResult FrameBufferDevice::create_framebuffer()
for (auto i = 0u; i < num_needed_pages; ++i) { for (auto i = 0u; i < num_needed_pages; ++i) {
pages.append(write_sink_page); pages.append(write_sink_page);
} }
auto maybe_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span()); m_framebuffer_sink_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span()));
if (maybe_framebuffer_sink_vmobject.is_error())
return maybe_framebuffer_sink_vmobject.error();
m_framebuffer_sink_vmobject = maybe_framebuffer_sink_vmobject.release_value();
MutexLocker locker(m_gpu.operation_lock()); MutexLocker locker(m_gpu.operation_lock());
m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load()); m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());

View file

@ -48,8 +48,11 @@ void GPU::initialize()
GPU::GPU(PCI::Address address) GPU::GPU(PCI::Address address)
: VirtIO::Device(address) : VirtIO::Device(address)
, m_scratch_space(MM.allocate_contiguous_kernel_region(32 * PAGE_SIZE, "VirtGPU Scratch Space", Memory::Region::Access::ReadWrite))
{ {
auto region_or_error = MM.allocate_contiguous_kernel_region(32 * PAGE_SIZE, "VirtGPU Scratch Space", Memory::Region::Access::ReadWrite);
if (region_or_error.is_error())
TODO();
m_scratch_space = region_or_error.release_value();
} }
GPU::~GPU() GPU::~GPU()

View file

@ -99,16 +99,18 @@ struct KmallocGlobalHeap {
// allocations not including the original allocation_request // allocations not including the original allocation_request
// that triggered heap expansion. If we don't allocate // that triggered heap expansion. If we don't allocate
memory_size += 1 * MiB; memory_size += 1 * MiB;
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
if (region) {
dbgln("kmalloc: Adding even more memory to heap at {}, bytes: {}", region->vaddr(), region->size());
m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size()); auto new_region_or_error = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
m_global_heap.m_subheap_memory.append(region.release_nonnull()); if (new_region_or_error.is_error()) {
} else {
dbgln("kmalloc: Could not expand heap to satisfy allocation of {} bytes", allocation_request); dbgln("kmalloc: Could not expand heap to satisfy allocation of {} bytes", allocation_request);
return false; return false;
} }
region = new_region_or_error.release_value();
dbgln("kmalloc: Adding even more memory to heap at {}, bytes: {}", region->vaddr(), region->size());
m_global_heap.m_heap.add_subheap(region->vaddr().as_ptr(), region->size());
m_global_heap.m_subheap_memory.append(region.release_nonnull());
} }
return true; return true;
} }
@ -173,7 +175,7 @@ struct KmallocGlobalHeap {
{ {
if (m_backup_memory) if (m_backup_memory)
return; return;
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
} }
size_t backup_memory_bytes() const size_t backup_memory_bytes() const

View file

@ -251,11 +251,12 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
set_base(apic_base); set_base(apic_base);
if (!m_is_x2) { if (!m_is_x2) {
m_apic_base = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite); auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
if (!m_apic_base) { if (region_or_error.is_error()) {
dbgln("APIC: Failed to allocate memory for APIC base"); dbgln("APIC: Failed to allocate memory for APIC base");
return false; return false;
} }
m_apic_base = region_or_error.release_value();
} }
auto rsdp = ACPI::StaticParsing::find_rsdp(); auto rsdp = ACPI::StaticParsing::find_rsdp();
@ -311,13 +312,13 @@ UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_reg
// FIXME: Would be nice to be able to return a KResultOr from here. // FIXME: Would be nice to be able to return a KResultOr from here.
VERIFY(!maybe_vmobject.is_error()); VERIFY(!maybe_vmobject.is_error());
auto region = MM.allocate_kernel_region_with_vmobject( auto region_or_error = MM.allocate_kernel_region_with_vmobject(
Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size }, Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size },
maybe_vmobject.release_value(), maybe_vmobject.release_value(),
{}, {},
Memory::Region::Access::ReadWriteExecute); Memory::Region::Access::ReadWriteExecute);
VERIFY(region); VERIFY(!region_or_error.is_error());
return region.release_nonnull(); return region_or_error.release_value();
} }
UNMAP_AFTER_INIT void APIC::do_boot_aps() UNMAP_AFTER_INIT void APIC::do_boot_aps()
@ -335,11 +336,12 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps()
// Allocate enough stacks for all APs // Allocate enough stacks for all APs
Vector<OwnPtr<Memory::Region>> apic_ap_stacks; Vector<OwnPtr<Memory::Region>> apic_ap_stacks;
for (u32 i = 0; i < aps_to_enable; i++) { for (u32 i = 0; i < aps_to_enable; i++) {
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); auto stack_region_or_error = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
if (!stack_region) { if (stack_region_or_error.is_error()) {
dbgln("APIC: Failed to allocate stack for AP #{}", i); dbgln("APIC: Failed to allocate stack for AP #{}", i);
return; return;
} }
auto stack_region = stack_region_or_error.release_value();
stack_region->set_stack(true); stack_region->set_stack(true);
apic_ap_stacks.append(move(stack_region)); apic_ap_stacks.append(move(stack_region));
} }

View file

@ -29,20 +29,21 @@ class KBufferImpl : public RefCounted<KBufferImpl> {
public: public:
static RefPtr<KBufferImpl> try_create_with_size(size_t size, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) static RefPtr<KBufferImpl> try_create_with_size(size_t size, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
auto region = MM.allocate_kernel_region(Memory::page_round_up(size), name, access, strategy); auto region_or_error = MM.allocate_kernel_region(Memory::page_round_up(size), name, access, strategy);
if (!region) if (region_or_error.is_error())
return nullptr; return nullptr;
return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region.release_nonnull(), size, strategy)); return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region_or_error.release_value(), size, strategy));
} }
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{ {
auto region = MM.allocate_kernel_region(Memory::page_round_up(bytes.size()), name, access, strategy); auto region_or_error = MM.allocate_kernel_region(Memory::page_round_up(bytes.size()), name, access, strategy);
if (!region) if (region_or_error.is_error())
return nullptr; return nullptr;
auto region = region_or_error.release_value();
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size()); memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region.release_nonnull(), bytes.size(), strategy)); return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(move(region), bytes.size(), strategy));
} }
static RefPtr<KBufferImpl> create_with_size(size_t size, Memory::Region::Access access, StringView name, AllocationStrategy strategy = AllocationStrategy::Reserve) static RefPtr<KBufferImpl> create_with_size(size_t size, Memory::Region::Access access, StringView name, AllocationStrategy strategy = AllocationStrategy::Reserve)
@ -61,12 +62,13 @@ public:
[[nodiscard]] bool expand(size_t new_capacity) [[nodiscard]] bool expand(size_t new_capacity)
{ {
auto new_region = MM.allocate_kernel_region(Memory::page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy); auto new_region_or_error = MM.allocate_kernel_region(Memory::page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
if (!new_region) if (new_region_or_error.is_error())
return false; return false;
auto new_region = new_region_or_error.release_value();
if (m_size > 0) if (m_size > 0)
memcpy(new_region->vaddr().as_ptr(), data(), min(m_region->size(), m_size)); memcpy(new_region->vaddr().as_ptr(), data(), min(m_region->size(), m_size));
m_region = new_region.release_nonnull(); m_region = move(new_region);
return true; return true;
} }

View file

@ -700,71 +700,46 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
return region->handle_fault(fault); return region->handle_fault(fault);
} }
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size); auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
if (range_or_error.is_error()) auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
return {}; return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
auto range = range_or_error.release_value();
auto maybe_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
if (maybe_vmobject.is_error()) {
kernel_page_directory().range_allocator().deallocate(range);
// FIXME: Would be nice to be able to return a KResultOr from here.
return {};
}
return allocate_kernel_region_with_vmobject(range, maybe_vmobject.release_value(), name, access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable) KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy); auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
if (maybe_vm_object.is_error())
return {};
SpinlockLocker lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size); auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
if (range_or_error.is_error()) return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
return {};
auto range = range_or_error.release_value();
return allocate_kernel_region_with_vmobject(range, maybe_vm_object.release_value(), name, access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
auto maybe_vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
if (maybe_vm_object.is_error())
return {};
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
SpinlockLocker lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size); auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
if (range_or_error.is_error()) return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
return {};
auto range = range_or_error.release_value();
return allocate_kernel_region_with_vmobject(range, maybe_vm_object.release_value(), name, access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
auto maybe_region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable); auto region = TRY(Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable));
if (maybe_region.is_error())
return {};
auto region = maybe_region.release_value();
if (!region->map(kernel_page_directory())) if (!region->map(kernel_page_directory()))
return {}; return ENOMEM;
return region; return region;
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) KResultOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size); auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
if (range_or_error.is_error())
return {};
auto range = range_or_error.release_value();
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable); return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
} }
@ -909,8 +884,13 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
return {}; return {};
} }
auto cleanup_region = MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write); {
fast_u32_fill((u32*)cleanup_region->vaddr().as_ptr(), 0, (PAGE_SIZE * count) / sizeof(u32)); auto region_or_error = MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write);
if (region_or_error.is_error())
TODO();
auto cleanup_region = region_or_error.release_value();
fast_u32_fill((u32*)cleanup_region->vaddr().as_ptr(), 0, (PAGE_SIZE * count) / sizeof(u32));
}
m_system_memory_info.super_physical_pages_used += count; m_system_memory_info.super_physical_pages_used += count;
return physical_pages; return physical_pages;
} }

View file

@ -180,11 +180,11 @@ public:
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size); NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress); void deallocate_physical_page(PhysicalAddress);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); KResultOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); KResultOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
struct SystemMemoryInfo { struct SystemMemoryInfo {
PhysicalSize user_physical_pages { 0 }; PhysicalSize user_physical_pages { 0 };

View file

@ -11,7 +11,7 @@
namespace Kernel::Memory { namespace Kernel::Memory {
RingBuffer::RingBuffer(String region_name, size_t capacity) RingBuffer::RingBuffer(String region_name, size_t capacity)
: m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity), move(region_name), Region::Access::Read | Region::Access::Write)) : m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity), move(region_name), Region::Access::Read | Region::Access::Write).release_value())
, m_capacity_in_bytes(capacity) , m_capacity_in_bytes(capacity)
{ {
} }

View file

@ -21,7 +21,10 @@ RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest&
ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size) ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
: m_vm_object(move(vm_object)) : m_vm_object(move(vm_object))
{ {
m_dma_region = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes); auto region_or_error = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
if (region_or_error.is_error())
TODO();
m_dma_region = region_or_error.release_value();
} }
} }

View file

@ -28,7 +28,10 @@ static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, Region::A
{ {
TypedMapping<T> table; TypedMapping<T> table;
size_t mapping_length = page_round_up(paddr.offset_in_page() + length); size_t mapping_length = page_round_up(paddr.offset_in_page() + length);
table.region = MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access); auto region_or_error = MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access);
if (region_or_error.is_error())
TODO();
table.region = region_or_error.release_value();
table.offset = paddr.offset_in_page(); table.offset = paddr.offset_in_page();
return table; return table;
} }

View file

@ -204,9 +204,10 @@ UNMAP_AFTER_INIT bool E1000ENetworkAdapter::initialize()
enable_bus_mastering(pci_address()); enable_bus_mastering(pci_address());
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0); size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000e MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No); auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000e MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
if (!m_mmio_region) if (region_or_error.is_error())
return false; return false;
m_mmio_region = region_or_error.release_value();
m_mmio_base = m_mmio_region->vaddr(); m_mmio_base = m_mmio_region->vaddr();
m_use_mmio = true; m_use_mmio = true;
m_interrupt_line = PCI::get_interrupt_line(pci_address()); m_interrupt_line = PCI::get_interrupt_line(pci_address());

View file

@ -195,9 +195,10 @@ UNMAP_AFTER_INIT bool E1000NetworkAdapter::initialize()
m_io_base = IOAddress(PCI::get_BAR1(pci_address()) & ~1); m_io_base = IOAddress(PCI::get_BAR1(pci_address()) & ~1);
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0); size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000 MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No); auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000 MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
if (!m_mmio_region) if (region_or_error.is_error())
return false; return false;
m_mmio_region = region_or_error.release_value();
m_mmio_base = m_mmio_region->vaddr(); m_mmio_base = m_mmio_region->vaddr();
m_use_mmio = true; m_use_mmio = true;
m_interrupt_line = PCI::get_interrupt_line(pci_address()); m_interrupt_line = PCI::get_interrupt_line(pci_address());
@ -222,8 +223,8 @@ UNMAP_AFTER_INIT bool E1000NetworkAdapter::initialize()
UNMAP_AFTER_INIT E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address, u8 irq) UNMAP_AFTER_INIT E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address, u8 irq)
: PCI::Device(address) : PCI::Device(address)
, IRQHandler(irq) , IRQHandler(irq)
, m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16), "E1000 RX Descriptors", Memory::Region::Access::ReadWrite)) , m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16), "E1000 RX Descriptors", Memory::Region::Access::ReadWrite).release_value())
, m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16), "E1000 TX Descriptors", Memory::Region::Access::ReadWrite)) , m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16), "E1000 TX Descriptors", Memory::Region::Access::ReadWrite).release_value())
{ {
set_interface_name(pci_address()); set_interface_name(pci_address());
} }
@ -321,7 +322,7 @@ UNMAP_AFTER_INIT void E1000NetworkAdapter::initialize_rx_descriptors()
constexpr auto rx_buffer_size = 8192; constexpr auto rx_buffer_size = 8192;
constexpr auto rx_buffer_page_count = rx_buffer_size / PAGE_SIZE; constexpr auto rx_buffer_page_count = rx_buffer_size / PAGE_SIZE;
m_rx_buffer_region = MM.allocate_contiguous_kernel_region(rx_buffer_size * number_of_rx_descriptors, "E1000 RX buffers", Memory::Region::Access::ReadWrite); m_rx_buffer_region = MM.allocate_contiguous_kernel_region(rx_buffer_size * number_of_rx_descriptors, "E1000 RX buffers", Memory::Region::Access::ReadWrite).release_value();
for (size_t i = 0; i < number_of_rx_descriptors; ++i) { for (size_t i = 0; i < number_of_rx_descriptors; ++i) {
auto& descriptor = rx_descriptors[i]; auto& descriptor = rx_descriptors[i];
m_rx_buffers[i] = m_rx_buffer_region->vaddr().as_ptr() + rx_buffer_size * i; m_rx_buffers[i] = m_rx_buffer_region->vaddr().as_ptr() + rx_buffer_size * i;
@ -344,7 +345,7 @@ UNMAP_AFTER_INIT void E1000NetworkAdapter::initialize_tx_descriptors()
constexpr auto tx_buffer_size = 8192; constexpr auto tx_buffer_size = 8192;
constexpr auto tx_buffer_page_count = tx_buffer_size / PAGE_SIZE; constexpr auto tx_buffer_page_count = tx_buffer_size / PAGE_SIZE;
m_tx_buffer_region = MM.allocate_contiguous_kernel_region(tx_buffer_size * number_of_tx_descriptors, "E1000 TX buffers", Memory::Region::Access::ReadWrite); m_tx_buffer_region = MM.allocate_contiguous_kernel_region(tx_buffer_size * number_of_tx_descriptors, "E1000 TX buffers", Memory::Region::Access::ReadWrite).release_value();
for (size_t i = 0; i < number_of_tx_descriptors; ++i) { for (size_t i = 0; i < number_of_tx_descriptors; ++i) {
auto& descriptor = tx_descriptors[i]; auto& descriptor = tx_descriptors[i];

View file

@ -88,7 +88,10 @@ void NetworkTask_main(void*)
}; };
size_t buffer_size = 64 * KiB; size_t buffer_size = 64 * KiB;
auto buffer_region = MM.allocate_kernel_region(buffer_size, "Kernel Packet Buffer", Memory::Region::Access::ReadWrite); auto region_or_error = MM.allocate_kernel_region(buffer_size, "Kernel Packet Buffer", Memory::Region::Access::ReadWrite);
if (region_or_error.is_error())
TODO();
auto buffer_region = region_or_error.release_value();
auto buffer = (u8*)buffer_region->vaddr().get(); auto buffer = (u8*)buffer_region->vaddr().get();
Time packet_timestamp; Time packet_timestamp;

View file

@ -125,8 +125,8 @@ UNMAP_AFTER_INIT RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address addre
: PCI::Device(address) : PCI::Device(address)
, IRQHandler(irq) , IRQHandler(irq)
, m_io_base(PCI::get_BAR0(pci_address()) & ~1) , m_io_base(PCI::get_BAR0(pci_address()) & ~1)
, m_rx_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE + PACKET_SIZE_MAX), "RTL8139 RX", Memory::Region::Access::ReadWrite)) , m_rx_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE + PACKET_SIZE_MAX), "RTL8139 RX", Memory::Region::Access::ReadWrite).release_value())
, m_packet_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(PACKET_SIZE_MAX), "RTL8139 Packet buffer", Memory::Region::Access::ReadWrite)) , m_packet_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(PACKET_SIZE_MAX), "RTL8139 Packet buffer", Memory::Region::Access::ReadWrite).release_value())
{ {
m_tx_buffers.ensure_capacity(RTL8139_TX_BUFFER_COUNT); m_tx_buffers.ensure_capacity(RTL8139_TX_BUFFER_COUNT);
set_interface_name(address); set_interface_name(address);
@ -145,7 +145,7 @@ UNMAP_AFTER_INIT RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address addre
dbgln("RTL8139: RX buffer: {}", m_rx_buffer->physical_page(0)->paddr()); dbgln("RTL8139: RX buffer: {}", m_rx_buffer->physical_page(0)->paddr());
for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) { for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) {
m_tx_buffers.append(MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE), "RTL8139 TX", Memory::Region::Access::Write | Memory::Region::Access::Read)); m_tx_buffers.append(MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE), "RTL8139 TX", Memory::Region::Access::Write | Memory::Region::Access::Read).release_value());
dbgln("RTL8139: TX buffer {}: {}", i, m_tx_buffers[i]->physical_page(0)->paddr()); dbgln("RTL8139: TX buffer {}: {}", i, m_tx_buffers[i]->physical_page(0)->paddr());
} }

View file

@ -195,8 +195,8 @@ UNMAP_AFTER_INIT RTL8168NetworkAdapter::RTL8168NetworkAdapter(PCI::Address addre
: PCI::Device(address) : PCI::Device(address)
, IRQHandler(irq) , IRQHandler(irq)
, m_io_base(PCI::get_BAR0(pci_address()) & ~1) , m_io_base(PCI::get_BAR0(pci_address()) & ~1)
, m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(TXDescriptor) * (number_of_rx_descriptors + 1)), "RTL8168 RX", Memory::Region::Access::ReadWrite)) , m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(TXDescriptor) * (number_of_rx_descriptors + 1)), "RTL8168 RX", Memory::Region::Access::ReadWrite).release_value())
, m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(RXDescriptor) * (number_of_tx_descriptors + 1)), "RTL8168 TX", Memory::Region::Access::ReadWrite)) , m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(RXDescriptor) * (number_of_tx_descriptors + 1)), "RTL8168 TX", Memory::Region::Access::ReadWrite).release_value())
{ {
set_interface_name(address); set_interface_name(address);
@ -1043,10 +1043,9 @@ UNMAP_AFTER_INIT void RTL8168NetworkAdapter::initialize_rx_descriptors()
auto* rx_descriptors = (RXDescriptor*)m_rx_descriptors_region->vaddr().as_ptr(); auto* rx_descriptors = (RXDescriptor*)m_rx_descriptors_region->vaddr().as_ptr();
for (size_t i = 0; i < number_of_rx_descriptors; ++i) { for (size_t i = 0; i < number_of_rx_descriptors; ++i) {
auto& descriptor = rx_descriptors[i]; auto& descriptor = rx_descriptors[i];
auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE), "RTL8168 RX buffer", Memory::Region::Access::ReadWrite); auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE), "RTL8168 RX buffer", Memory::Region::Access::ReadWrite).release_value();
VERIFY(region);
memset(region->vaddr().as_ptr(), 0, region->size()); // MM already zeros out newly allocated pages, but we do it again in case that ever changes memset(region->vaddr().as_ptr(), 0, region->size()); // MM already zeros out newly allocated pages, but we do it again in case that ever changes
m_rx_buffers_regions.append(region.release_nonnull()); m_rx_buffers_regions.append(move(region));
descriptor.buffer_size = RX_BUFFER_SIZE; descriptor.buffer_size = RX_BUFFER_SIZE;
descriptor.flags = RXDescriptor::Ownership; // let the NIC know it can use this descriptor descriptor.flags = RXDescriptor::Ownership; // let the NIC know it can use this descriptor
@ -1062,10 +1061,9 @@ UNMAP_AFTER_INIT void RTL8168NetworkAdapter::initialize_tx_descriptors()
auto* tx_descriptors = (TXDescriptor*)m_tx_descriptors_region->vaddr().as_ptr(); auto* tx_descriptors = (TXDescriptor*)m_tx_descriptors_region->vaddr().as_ptr();
for (size_t i = 0; i < number_of_tx_descriptors; ++i) { for (size_t i = 0; i < number_of_tx_descriptors; ++i) {
auto& descriptor = tx_descriptors[i]; auto& descriptor = tx_descriptors[i];
auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE), "RTL8168 TX buffer", Memory::Region::Access::ReadWrite); auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE), "RTL8168 TX buffer", Memory::Region::Access::ReadWrite).release_value();
VERIFY(region);
memset(region->vaddr().as_ptr(), 0, region->size()); // MM already zeros out newly allocated pages, but we do it again in case that ever changes memset(region->vaddr().as_ptr(), 0, region->size()); // MM already zeros out newly allocated pages, but we do it again in case that ever changes
m_tx_buffers_regions.append(region.release_nonnull()); m_tx_buffers_regions.append(move(region));
descriptor.flags = TXDescriptor::FirstSegment | TXDescriptor::LastSegment; descriptor.flags = TXDescriptor::FirstSegment | TXDescriptor::LastSegment;
auto physical_address = m_tx_buffers_regions[i].physical_page(0)->paddr().get(); auto physical_address = m_tx_buffers_regions[i].physical_page(0)->paddr().get();

View file

@ -363,7 +363,7 @@ extern "C" char const asm_signal_trampoline_end[];
void create_signal_trampoline() void create_signal_trampoline()
{ {
// NOTE: We leak this region. // NOTE: We leak this region.
g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines", Memory::Region::Access::ReadWrite).leak_ptr(); g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines", Memory::Region::Access::ReadWrite).release_value().leak_ptr();
g_signal_trampoline_region->set_syscall_region(true); g_signal_trampoline_region->set_syscall_region(true);
size_t trampoline_size = asm_signal_trampoline_end - asm_signal_trampoline; size_t trampoline_size = asm_signal_trampoline_end - asm_signal_trampoline;

View file

@ -126,8 +126,7 @@ AHCI::HBADefinedCapabilities AHCIController::capabilities() const
NonnullOwnPtr<Memory::Region> AHCIController::default_hba_region() const NonnullOwnPtr<Memory::Region> AHCIController::default_hba_region() const
{ {
auto region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR5(pci_address())).page_base(), Memory::page_round_up(sizeof(AHCI::HBA)), "AHCI HBA", Memory::Region::Access::ReadWrite); return MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR5(pci_address())).page_base(), Memory::page_round_up(sizeof(AHCI::HBA)), "AHCI HBA", Memory::Region::Access::ReadWrite).release_value();
return region.release_nonnull();
} }
AHCIController::~AHCIController() AHCIController::~AHCIController()

View file

@ -50,7 +50,11 @@ AHCIPort::AHCIPort(const AHCIPortHandler& handler, volatile AHCI::PortRegisters&
for (size_t index = 0; index < 1; index++) { for (size_t index = 0; index < 1; index++) {
m_command_table_pages.append(MM.allocate_supervisor_physical_page().release_nonnull()); m_command_table_pages.append(MM.allocate_supervisor_physical_page().release_nonnull());
} }
m_command_list_region = MM.allocate_kernel_region(m_command_list_page->paddr(), PAGE_SIZE, "AHCI Port Command List", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
auto region_or_error = MM.allocate_kernel_region(m_command_list_page->paddr(), PAGE_SIZE, "AHCI Port Command List", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
if (region_or_error.is_error())
TODO();
m_command_list_region = region_or_error.release_value();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Command list region at {}", representative_port_index(), m_command_list_region->vaddr()); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Command list region at {}", representative_port_index(), m_command_list_region->vaddr());
} }
@ -159,7 +163,7 @@ void AHCIPort::eject()
// handshake error bit in PxSERR register if CFL is incorrect. // handshake error bit in PxSERR register if CFL is incorrect.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P | AHCI::CommandHeaderAttributes::C | AHCI::CommandHeaderAttributes::A; command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P | AHCI::CommandHeaderAttributes::C | AHCI::CommandHeaderAttributes::A;
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No); auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64); memset(const_cast<u8*>(command_table.command_fis), 0, 64);
auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis; auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis;
@ -526,7 +530,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes); dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No); auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr()); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
@ -610,7 +614,7 @@ bool AHCIPort::identify_device(SpinlockLocker<Spinlock>& main_lock)
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register. // QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P; command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite); auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64); memset(const_cast<u8*>(command_table.command_fis), 0, 64);
command_table.descriptors[0].base_high = 0; command_table.descriptors[0].base_high = 0;

View file

@ -43,8 +43,19 @@ UNMAP_AFTER_INIT void BMIDEChannel::initialize()
m_dma_buffer_page = MM.allocate_supervisor_physical_page(); m_dma_buffer_page = MM.allocate_supervisor_physical_page();
if (m_dma_buffer_page.is_null() || m_prdt_page.is_null()) if (m_dma_buffer_page.is_null() || m_prdt_page.is_null())
return; return;
m_prdt_region = MM.allocate_kernel_region(m_prdt_page->paddr(), PAGE_SIZE, "IDE PRDT", Memory::Region::Access::ReadWrite); {
m_dma_buffer_region = MM.allocate_kernel_region(m_dma_buffer_page->paddr(), PAGE_SIZE, "IDE DMA region", Memory::Region::Access::ReadWrite); auto region_or_error = MM.allocate_kernel_region(m_prdt_page->paddr(), PAGE_SIZE, "IDE PRDT", Memory::Region::Access::ReadWrite);
if (region_or_error.is_error())
TODO();
m_prdt_region = region_or_error.release_value();
}
{
auto region_or_error = MM.allocate_kernel_region(m_dma_buffer_page->paddr(), PAGE_SIZE, "IDE DMA region", Memory::Region::Access::ReadWrite);
if (region_or_error.is_error())
TODO();
m_dma_buffer_region = region_or_error.release_value();
}
prdt().end_of_table = 0x8000; prdt().end_of_table = 0x8000;
// clear bus master interrupt status // clear bus master interrupt status

View file

@ -49,11 +49,12 @@ RamdiskController::RamdiskController()
for (auto& used_memory_range : MM.used_memory_ranges()) { for (auto& used_memory_range : MM.used_memory_ranges()) {
if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) { if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) {
size_t length = Memory::page_round_up(used_memory_range.end.get()) - used_memory_range.start.get(); size_t length = Memory::page_round_up(used_memory_range.end.get()) - used_memory_range.start.get();
auto region = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Memory::Region::Access::ReadWrite); auto region_or_error = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Memory::Region::Access::ReadWrite);
if (!region) if (region_or_error.is_error()) {
dmesgln("RamdiskController: Failed to allocate kernel region of size {}", length); dmesgln("RamdiskController: Failed to allocate kernel region of size {}", length);
else } else {
m_devices.append(RamdiskDevice::create(*this, region.release_nonnull(), 6, count)); m_devices.append(RamdiskDevice::create(*this, region_or_error.release_value(), 6, count));
}
count++; count++;
} }
} }

View file

@ -173,12 +173,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro
size_t executable_size = inode.size(); size_t executable_size = inode.size();
auto region = MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF memory range calculation", Memory::Region::Access::Read); auto region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF memory range calculation", Memory::Region::Access::Read));
if (!region) {
dbgln("Could not allocate memory for ELF");
return ENOMEM;
}
auto elf_image = ELF::Image(region->vaddr().as_ptr(), executable_size); auto elf_image = ELF::Image(region->vaddr().as_ptr(), executable_size);
if (!elf_image.is_valid()) { if (!elf_image.is_valid()) {
return EINVAL; return EINVAL;
@ -283,12 +278,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
size_t executable_size = inode.size(); size_t executable_size = inode.size();
auto executable_region = MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF loading", Memory::Region::Access::Read); auto executable_region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF loading", Memory::Region::Access::Read));
if (!executable_region) {
dbgln("Could not allocate memory for ELF loading");
return ENOMEM;
}
auto elf_image = ELF::Image(executable_region->vaddr().as_ptr(), executable_size); auto elf_image = ELF::Image(executable_region->vaddr().as_ptr(), executable_size);
if (!elf_image.is_valid()) if (!elf_image.is_valid())

View file

@ -120,7 +120,7 @@ UNMAP_AFTER_INIT void VirtualConsole::initialize()
// Allocate twice of the max row * max column * sizeof(Cell) to ensure we can have some sort of history mechanism... // Allocate twice of the max row * max column * sizeof(Cell) to ensure we can have some sort of history mechanism...
auto size = GraphicsManagement::the().console()->max_column() * GraphicsManagement::the().console()->max_row() * sizeof(Cell) * 2; auto size = GraphicsManagement::the().console()->max_column() * GraphicsManagement::the().console()->max_row() * sizeof(Cell) * 2;
m_cells = MM.allocate_kernel_region(Memory::page_round_up(size), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); m_cells = MM.allocate_kernel_region(Memory::page_round_up(size), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
// Add the lines, so we also ensure they will be flushed now // Add the lines, so we also ensure they will be flushed now
for (size_t row = 0; row < rows(); row++) { for (size_t row = 0; row < rows(); row++) {
@ -139,7 +139,7 @@ void VirtualConsole::refresh_after_resolution_change()
// Note: From now on, columns() and rows() are updated with the new settings. // Note: From now on, columns() and rows() are updated with the new settings.
auto size = GraphicsManagement::the().console()->max_column() * GraphicsManagement::the().console()->max_row() * sizeof(Cell) * 2; auto size = GraphicsManagement::the().console()->max_column() * GraphicsManagement::the().console()->max_row() * sizeof(Cell) * 2;
auto new_cells = MM.allocate_kernel_region(Memory::page_round_up(size), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); auto new_cells = MM.allocate_kernel_region(Memory::page_round_up(size), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
if (rows() < old_rows_count) { if (rows() < old_rows_count) {
m_lines.shrink(rows()); m_lines.shrink(rows());

View file

@ -39,9 +39,7 @@ SpinlockProtected<Thread::GlobalList>& Thread::all_instances()
KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process) KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
{ {
auto kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); auto kernel_stack_region = TRY(MM.allocate_kernel_region(default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
if (!kernel_stack_region)
return ENOMEM;
kernel_stack_region->set_stack(true); kernel_stack_region->set_stack(true);
auto block_timer = try_make_ref_counted<Timer>(); auto block_timer = try_make_ref_counted<Timer>();
@ -50,7 +48,7 @@ KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> proce
auto name = KString::try_create(process->name()); auto name = KString::try_create(process->name());
return adopt_nonnull_ref_or_enomem(new (nothrow) Thread(move(process), kernel_stack_region.release_nonnull(), block_timer.release_nonnull(), move(name))); return adopt_nonnull_ref_or_enomem(new (nothrow) Thread(move(process), move(kernel_stack_region), block_timer.release_nonnull(), move(name)));
} }
Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> kernel_stack_region, NonnullRefPtr<Timer> block_timer, OwnPtr<KString> name) Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> kernel_stack_region, NonnullRefPtr<Timer> block_timer, OwnPtr<KString> name)

View file

@ -413,7 +413,7 @@ u64 HPET::ns_to_raw_counter_ticks(u64 ns) const
UNMAP_AFTER_INIT HPET::HPET(PhysicalAddress acpi_hpet) UNMAP_AFTER_INIT HPET::HPET(PhysicalAddress acpi_hpet)
: m_physical_acpi_hpet_table(acpi_hpet) : m_physical_acpi_hpet_table(acpi_hpet)
, m_physical_acpi_hpet_registers(find_acpi_hpet_registers_block()) , m_physical_acpi_hpet_registers(find_acpi_hpet_registers_block())
, m_hpet_mmio_region(MM.allocate_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO", Memory::Region::Access::ReadWrite)) , m_hpet_mmio_region(MM.allocate_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO", Memory::Region::Access::ReadWrite).release_value())
{ {
s_hpet = this; // Make available as soon as possible so that IRQs can use it s_hpet = this; // Make available as soon as possible so that IRQs can use it

View file

@ -146,8 +146,7 @@ UNMAP_AFTER_INIT void TimeManagement::initialize(u32 cpu)
s_the->set_system_timer(*apic_timer); s_the->set_system_timer(*apic_timer);
} }
s_the->m_time_page_region = MM.allocate_kernel_region(PAGE_SIZE, "Time page"sv, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow); s_the->m_time_page_region = MM.allocate_kernel_region(PAGE_SIZE, "Time page"sv, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
VERIFY(s_the->m_time_page_region);
} else { } else {
VERIFY(s_the.is_initialized()); VERIFY(s_the.is_initialized());
if (auto* apic_timer = APIC::the().get_timer()) { if (auto* apic_timer = APIC::the().get_timer()) {