diff --git a/Kernel/Devices/SB16.cpp b/Kernel/Devices/SB16.cpp index 935a0b5271..e65d87605f 100644 --- a/Kernel/Devices/SB16.cpp +++ b/Kernel/Devices/SB16.cpp @@ -179,7 +179,7 @@ ssize_t SB16::read(FileDescription&, size_t, u8*, ssize_t) void SB16::dma_start(uint32_t length) { - const auto addr = m_dma_region->vmobject().physical_pages()[0]->paddr().get(); + const auto addr = m_dma_region->physical_page(0)->paddr().get(); const u8 channel = 5; // 16-bit samples use DMA channel 5 (on the master DMA controller) const u8 mode = 0; diff --git a/Kernel/FileSystem/ProcFS.cpp b/Kernel/FileSystem/ProcFS.cpp index b912728f9f..927383dd45 100644 --- a/Kernel/FileSystem/ProcFS.cpp +++ b/Kernel/FileSystem/ProcFS.cpp @@ -319,11 +319,10 @@ Optional procfs$pid_vm(InodeIdentifier identifier) StringBuilder pagemap_builder; for (size_t i = 0; i < region.page_count(); ++i) { - auto page_index = region.first_page_index() + i; - auto& physical_page_slot = region.vmobject().physical_pages()[page_index]; - if (!physical_page_slot) + auto* page = region.physical_page(i); + if (!page) pagemap_builder.append('N'); - else if (physical_page_slot == MM.shared_zero_page()) + else if (page->is_shared_zero_page()) pagemap_builder.append('Z'); else pagemap_builder.append('P'); diff --git a/Kernel/Net/E1000NetworkAdapter.cpp b/Kernel/Net/E1000NetworkAdapter.cpp index e9bf289974..9c422246ee 100644 --- a/Kernel/Net/E1000NetworkAdapter.cpp +++ b/Kernel/Net/E1000NetworkAdapter.cpp @@ -279,12 +279,14 @@ void E1000NetworkAdapter::initialize_rx_descriptors() auto* rx_descriptors = (e1000_tx_desc*)m_rx_descriptors_region->vaddr().as_ptr(); for (int i = 0; i < number_of_rx_descriptors; ++i) { auto& descriptor = rx_descriptors[i]; - m_rx_buffers_regions.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(8192), "E1000 RX buffer", Region::Access::Read | Region::Access::Write)); - descriptor.addr = m_rx_buffers_regions[i]->vmobject().physical_pages()[0]->paddr().get(); + auto region = MM.allocate_contiguous_kernel_region(8192, "E1000 RX buffer", Region::Access::Read | Region::Access::Write); + ASSERT(region); + m_rx_buffers_regions.append(region.release_nonnull()); + descriptor.addr = m_rx_buffers_regions[i].physical_page(0)->paddr().get(); descriptor.status = 0; } - out32(REG_RXDESCLO, m_rx_descriptors_region->vmobject().physical_pages()[0]->paddr().get()); + out32(REG_RXDESCLO, m_rx_descriptors_region->physical_page(0)->paddr().get()); out32(REG_RXDESCHI, 0); out32(REG_RXDESCLEN, number_of_rx_descriptors * sizeof(e1000_rx_desc)); out32(REG_RXDESCHEAD, 0); @@ -298,12 +300,14 @@ void E1000NetworkAdapter::initialize_tx_descriptors() auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr(); for (int i = 0; i < number_of_tx_descriptors; ++i) { auto& descriptor = tx_descriptors[i]; - m_tx_buffers_regions.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(8192), "E1000 TX buffer", Region::Access::Read | Region::Access::Write)); - descriptor.addr = m_tx_buffers_regions[i]->vmobject().physical_pages()[0]->paddr().get(); + auto region = MM.allocate_contiguous_kernel_region(8192, "E1000 TX buffer", Region::Access::Read | Region::Access::Write); + ASSERT(region); + m_tx_buffers_regions.append(region.release_nonnull()); + descriptor.addr = m_tx_buffers_regions[i].physical_page(0)->paddr().get(); descriptor.cmd = 0; } - out32(REG_TXDESCLO, m_tx_descriptors_region->vmobject().physical_pages()[0]->paddr().get()); + out32(REG_TXDESCLO, m_tx_descriptors_region->physical_page(0)->paddr().get()); out32(REG_TXDESCHI, 0); out32(REG_TXDESCLEN, number_of_tx_descriptors * sizeof(e1000_tx_desc)); out32(REG_TXDESCHEAD, 0); @@ -392,7 +396,7 @@ void E1000NetworkAdapter::send_raw(const u8* data, size_t length) auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr(); auto& descriptor = tx_descriptors[tx_current]; ASSERT(length <= 8192); - auto* vptr = (void*)m_tx_buffers_regions[tx_current]->vaddr().as_ptr(); + auto* vptr = (void*)m_tx_buffers_regions[tx_current].vaddr().as_ptr(); memcpy(vptr, data, length); descriptor.length = length; descriptor.status = 0; @@ -427,7 +431,7 @@ void E1000NetworkAdapter::receive() rx_current = (rx_current + 1) % number_of_rx_descriptors; if (!(rx_descriptors[rx_current].status & 1)) break; - auto* buffer = m_rx_buffers_regions[rx_current]->vaddr().as_ptr(); + auto* buffer = m_rx_buffers_regions[rx_current].vaddr().as_ptr(); u16 length = rx_descriptors[rx_current].length; #ifdef E1000_DEBUG klog() << "E1000: Received 1 packet @ " << buffer << " (" << length << ") bytes!"; diff --git a/Kernel/Net/E1000NetworkAdapter.h b/Kernel/Net/E1000NetworkAdapter.h index 2d95f58e9b..934f77e9cf 100644 --- a/Kernel/Net/E1000NetworkAdapter.h +++ b/Kernel/Net/E1000NetworkAdapter.h @@ -26,6 +26,7 @@ #pragma once +#include #include #include #include @@ -96,8 +97,8 @@ private: VirtualAddress m_mmio_base; OwnPtr m_rx_descriptors_region; OwnPtr m_tx_descriptors_region; - Vector> m_rx_buffers_regions; - Vector> m_tx_buffers_regions; + NonnullOwnPtrVector m_rx_buffers_regions; + NonnullOwnPtrVector m_tx_buffers_regions; OwnPtr m_mmio_region; u8 m_interrupt_line { 0 }; bool m_has_eeprom { false }; diff --git a/Kernel/Net/RTL8139NetworkAdapter.cpp b/Kernel/Net/RTL8139NetworkAdapter.cpp index 442591fb0a..45a6812499 100644 --- a/Kernel/Net/RTL8139NetworkAdapter.cpp +++ b/Kernel/Net/RTL8139NetworkAdapter.cpp @@ -158,11 +158,11 @@ RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address address, u8 irq) // we add space to account for overhang from the last packet - the rtl8139 // can optionally guarantee that packets will be contiguous by // purposefully overrunning the rx buffer - klog() << "RTL8139: RX buffer: " << m_rx_buffer->vmobject().physical_pages()[0]->paddr(); + klog() << "RTL8139: RX buffer: " << m_rx_buffer->physical_page(0)->paddr(); for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) { m_tx_buffers.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(TX_BUFFER_SIZE), "RTL8139 TX", Region::Access::Write | Region::Access::Read)); - klog() << "RTL8139: TX buffer " << i << ": " << m_tx_buffers[i]->vmobject().physical_pages()[0]->paddr(); + klog() << "RTL8139: TX buffer " << i << ": " << m_tx_buffers[i]->physical_page(0)->paddr(); } reset(); @@ -250,7 +250,7 @@ void RTL8139NetworkAdapter::reset() // device might be in sleep mode, this will take it out out8(REG_CONFIG1, 0); // set up rx buffer - out32(REG_RXBUF, m_rx_buffer->vmobject().physical_pages()[0]->paddr().get()); + out32(REG_RXBUF, m_rx_buffer->physical_page(0)->paddr().get()); // reset missed packet counter out8(REG_MPC, 0); // "basic mode control register" options - 100mbit, full duplex, auto @@ -268,7 +268,7 @@ void RTL8139NetworkAdapter::reset() out32(REG_TXCFG, TXCFG_TXRR_ZERO | TXCFG_MAX_DMA_1K | TXCFG_IFG11); // tell the chip where we want it to DMA from for outgoing packets. for (int i = 0; i < 4; i++) - out32(REG_TXADDR0 + (i * 4), m_tx_buffers[i]->vmobject().physical_pages()[0]->paddr().get()); + out32(REG_TXADDR0 + (i * 4), m_tx_buffers[i]->physical_page(0)->paddr().get()); // re-lock config registers out8(REG_CFG9346, CFG9346_NONE); // enable rx/tx again in case they got turned off (apparently some cards diff --git a/Kernel/PCI/MMIOAccess.cpp b/Kernel/PCI/MMIOAccess.cpp index dfc8f96de7..9ade94ba4e 100644 --- a/Kernel/PCI/MMIOAccess.cpp +++ b/Kernel/PCI/MMIOAccess.cpp @@ -141,7 +141,7 @@ void MMIOAccess::map_device(Address address) dbg() << "PCI: Mapping device @ pci (" << String::format("%w", address.seg()) << ":" << String::format("%b", address.bus()) << ":" << String::format("%b", address.slot()) << "." << String::format("%b", address.function()) << ")" << " V 0x" << String::format("%x", m_mmio_window_region->vaddr().get()) << " P 0x" << String::format("%x", device_physical_mmio_space.get()); #endif - m_mmio_window_region->vmobject().physical_pages()[0] = PhysicalPage::create(device_physical_mmio_space, false, false); + m_mmio_window_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, false, false); m_mmio_window_region->remap(); m_mapped_address = address; } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 79104439a8..d092239140 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -131,7 +131,7 @@ bool Region::commit(size_t page_index) { ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable()); InterruptDisabler disabler; - auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index]; + auto& vmobject_physical_page_entry = physical_page_slot(page_index); if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) return true; auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes); @@ -165,8 +165,8 @@ size_t Region::amount_resident() const { size_t bytes = 0; for (size_t i = 0; i < page_count(); ++i) { - auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i]; - if (physical_page && !physical_page->is_shared_zero_page()) + auto* page = physical_page(i); + if (page && !page->is_shared_zero_page()) bytes += PAGE_SIZE; } return bytes; @@ -176,8 +176,8 @@ size_t Region::amount_shared() const { size_t bytes = 0; for (size_t i = 0; i < page_count(); ++i) { - auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i]; - if (physical_page && physical_page->ref_count() > 1 && !physical_page->is_shared_zero_page()) + auto* page = physical_page(i); + if (page && page->ref_count() > 1 && !page->is_shared_zero_page()) bytes += PAGE_SIZE; } return bytes; @@ -199,8 +199,8 @@ NonnullOwnPtr Region::create_kernel_only(const Range& range, NonnullRefP bool Region::should_cow(size_t page_index) const { - auto& slot = vmobject().physical_pages()[page_index]; - if (slot && slot->is_shared_zero_page()) + auto* page = physical_page(page_index); + if (page && page->is_shared_zero_page()) return true; if (m_shared) return false; @@ -224,12 +224,12 @@ void Region::map_individual_page_impl(size_t page_index) { auto page_vaddr = vaddr().offset(page_index * PAGE_SIZE); auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr); - auto& physical_page = vmobject().physical_pages()[first_page_index() + page_index]; - if (!physical_page || (!is_readable() && !is_writable())) { + auto* page = physical_page(page_index); + if (!page || (!is_readable() && !is_writable())) { pte.clear(); } else { pte.set_cache_disabled(!m_cacheable); - pte.set_physical_page_base(physical_page->paddr().get()); + pte.set_physical_page_base(page->paddr().get()); pte.set_present(true); if (should_cow(page_index)) pte.set_writable(false); @@ -239,7 +239,7 @@ void Region::map_individual_page_impl(size_t page_index) pte.set_execute_disabled(!is_executable()); pte.set_user_allowed(is_user_accessible()); #ifdef MM_DEBUG - dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")"; + dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")"; #endif } MM.flush_tlb(page_vaddr); @@ -249,7 +249,7 @@ void Region::remap_page(size_t page_index) { ASSERT(m_page_directory); InterruptDisabler disabler; - ASSERT(vmobject().physical_pages()[first_page_index() + page_index]); + ASSERT(physical_page(page_index)); map_individual_page_impl(page_index); } @@ -263,8 +263,8 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) pte.clear(); MM.flush_tlb(vaddr); #ifdef MM_DEBUG - auto& physical_page = vmobject().physical_pages()[first_page_index() + i]; - dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", physical_page ? physical_page->paddr().get() : 0) << " <<"; + auto* page = physical_page(i); + dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<"; #endif } if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) @@ -315,7 +315,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault) } #ifdef MAP_SHARED_ZERO_PAGE_LAZILY if (fault.is_read()) { - vmobject().physical_pages()[first_page_index() + page_index_in_region] = MM.shared_zero_page(); + physical_page_slot(page_index_in_region) = MM.shared_zero_page(); remap_page(page_index_in_region); return PageFaultResponse::Continue; } @@ -330,7 +330,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault) #ifdef PAGE_FAULT_DEBUG dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "]"; #endif - if (vmobject().physical_pages()[first_page_index() + page_index_in_region]->is_shared_zero_page()) { + if (physical_page(page_index_in_region)->is_shared_zero_page()) { #ifdef PAGE_FAULT_DEBUG dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "]"; #endif @@ -351,9 +351,9 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) LOCKER(vmobject().m_paging_lock); cli(); - auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region]; + auto& page_slot = physical_page_slot(page_index_in_region); - if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) { + if (!page_slot.is_null() && !page_slot->is_shared_zero_page()) { #ifdef PAGE_FAULT_DEBUG dbg() << "MM: zero_page() but page already present. Fine with me!"; #endif @@ -364,8 +364,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) if (Thread::current) Thread::current->did_zero_fault(); - auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes); - if (physical_page.is_null()) { + auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes); + if (page.is_null()) { klog() << "MM: handle_zero_fault was unable to allocate a physical page"; return PageFaultResponse::ShouldCrash; } @@ -373,7 +373,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) #ifdef PAGE_FAULT_DEBUG dbg() << " >> ZERO " << physical_page->paddr(); #endif - vmobject_physical_page_entry = move(physical_page); + page_slot = move(page); remap_page(page_index_in_region); return PageFaultResponse::Continue; } @@ -381,8 +381,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) { ASSERT_INTERRUPTS_DISABLED(); - auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region]; - if (vmobject_physical_page_entry->ref_count() == 1) { + auto& page_slot = physical_page_slot(page_index_in_region); + if (page_slot->ref_count() == 1) { #ifdef PAGE_FAULT_DEBUG dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w"; #endif @@ -397,19 +397,19 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) #ifdef PAGE_FAULT_DEBUG dbg() << " >> It's a COW page and it's time to COW!"; #endif - auto physical_page_to_copy = move(vmobject_physical_page_entry); - auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No); - if (physical_page.is_null()) { + auto physical_page_to_copy = move(page_slot); + auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No); + if (page.is_null()) { klog() << "MM: handle_cow_fault was unable to allocate a physical page"; return PageFaultResponse::ShouldCrash; } - u8* dest_ptr = MM.quickmap_page(*physical_page); + u8* dest_ptr = MM.quickmap_page(*page); const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); #ifdef PAGE_FAULT_DEBUG dbg() << " >> COW " << physical_page->paddr() << " <- " << physical_page_to_copy->paddr(); #endif copy_from_user(dest_ptr, src_ptr, PAGE_SIZE); - vmobject_physical_page_entry = move(physical_page); + page_slot = move(page); MM.unquickmap_page(); set_should_cow(page_index_in_region, false); remap_page(page_index_in_region); diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index b71c8986e9..1d95ea8844 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -128,6 +128,18 @@ public: return size() / PAGE_SIZE; } + const PhysicalPage* physical_page(size_t index) const + { + ASSERT(index < page_count()); + return vmobject().physical_pages()[first_page_index() + index]; + } + + RefPtr& physical_page_slot(size_t index) + { + ASSERT(index < page_count()); + return vmobject().physical_pages()[first_page_index() + index]; + } + size_t offset_in_vmobject() const { return m_offset_in_vmobject;