mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 14:37:45 +00:00
Kernel: Move Kernel/Memory/ code into Kernel::Memory namespace
This commit is contained in:
parent
a1d7ebf85a
commit
93d98d4976
153 changed files with 473 additions and 467 deletions
|
@ -57,7 +57,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
|
|||
{
|
||||
dmesgln("PCI: Using MMIO for PCI configuration space access");
|
||||
|
||||
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Region::Access::Read | Region::Access::Write);
|
||||
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
||||
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG Table length to choose the correct mapping size");
|
||||
auto* sdt = (ACPI::Structures::SDTHeader*)checkup_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
|
||||
u32 length = sdt->length;
|
||||
|
@ -66,7 +66,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
|
|||
dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
|
||||
checkup_region->unmap();
|
||||
|
||||
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Region::Access::Read | Region::Access::Write);
|
||||
auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
||||
|
||||
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr();
|
||||
dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get()));
|
||||
|
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
|
|||
// PCI::PhysicalID objects to the vector, because get_capabilities calls
|
||||
// PCI::read16 which will need this region to be mapped.
|
||||
u8 start_bus = m_segments.get(0).value().get_start_bus();
|
||||
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Region::Access::Read | Region::Access::Write);
|
||||
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(0, start_bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
||||
m_mapped_bus = start_bus;
|
||||
dbgln_if(PCI_DEBUG, "PCI: First PCI ECAM Mapped region for starting bus {} @ {} {}", start_bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
|
||||
|
||||
|
@ -102,7 +102,7 @@ void MMIOAccess::map_bus_region(u32 segment, u8 bus)
|
|||
VERIFY(m_access_lock.is_locked());
|
||||
if (m_mapped_bus == bus)
|
||||
return;
|
||||
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Region::Access::Read | Region::Access::Write);
|
||||
m_mapped_region = MM.allocate_kernel_region(determine_memory_mapped_bus_region(segment, bus), MEMORY_RANGE_PER_BUS, "PCI ECAM", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
||||
m_mapped_bus = bus;
|
||||
dbgln_if(PCI_DEBUG, "PCI: New PCI ECAM Mapped region for bus {} @ {} {}", bus, m_mapped_region->vaddr(), m_mapped_region->physical_page(0)->paddr());
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ private:
|
|||
VirtualAddress get_device_configuration_space(Address address);
|
||||
SpinLock<u8> m_access_lock;
|
||||
u8 m_mapped_bus { 0 };
|
||||
OwnPtr<Region> m_mapped_region;
|
||||
OwnPtr<Memory::Region> m_mapped_region;
|
||||
|
||||
protected:
|
||||
explicit MMIOAccess(PhysicalAddress mcfg);
|
||||
|
|
|
@ -18,12 +18,12 @@ namespace PCI {
|
|||
|
||||
UNMAP_AFTER_INIT DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMapping(Address device_address, const MMIOAccess::MMIOSegment& mmio_segment)
|
||||
: m_device_address(device_address)
|
||||
, m_mapped_region(MM.allocate_kernel_region(page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Region::Access::Read | Region::Access::Write).release_nonnull())
|
||||
, m_mapped_region(MM.allocate_kernel_region(Memory::page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Memory::Region::Access::Read | Memory::Region::Access::Write).release_nonnull())
|
||||
{
|
||||
PhysicalAddress segment_lower_addr = mmio_segment.get_paddr();
|
||||
PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset(
|
||||
PCI_MMIO_CONFIG_SPACE_SIZE * m_device_address.function() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE) * m_device_address.device() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS) * (m_device_address.bus() - mmio_segment.get_start_bus()));
|
||||
m_mapped_region->physical_page_slot(0) = PhysicalPage::create(device_physical_mmio_space, MayReturnToFreeList::No);
|
||||
m_mapped_region->physical_page_slot(0) = Memory::PhysicalPage::create(device_physical_mmio_space, Memory::MayReturnToFreeList::No);
|
||||
m_mapped_region->remap();
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ public:
|
|||
|
||||
private:
|
||||
Address m_device_address;
|
||||
NonnullOwnPtr<Region> m_mapped_region;
|
||||
NonnullOwnPtr<Memory::Region> m_mapped_region;
|
||||
};
|
||||
|
||||
class WindowedMMIOAccess final : public MMIOAccess {
|
||||
|
|
|
@ -289,8 +289,8 @@ void UHCIController::reset()
|
|||
}
|
||||
|
||||
// Let's allocate the physical page for the Frame List (which is 4KiB aligned)
|
||||
auto framelist_vmobj = AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
|
||||
m_framelist = MemoryManager::the().allocate_kernel_region_with_vmobject(*framelist_vmobj, PAGE_SIZE, "UHCI Framelist", Region::Access::Write);
|
||||
auto framelist_vmobj = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
|
||||
m_framelist = MM.allocate_kernel_region_with_vmobject(*framelist_vmobj, PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write);
|
||||
dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr());
|
||||
dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr());
|
||||
write_sofmod(64); // 1mS frame time
|
||||
|
@ -311,8 +311,8 @@ UNMAP_AFTER_INIT void UHCIController::create_structures()
|
|||
{
|
||||
// Let's allocate memory for both the QH and TD pools
|
||||
// First the QH pool and all of the Interrupt QH's
|
||||
auto qh_pool_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
|
||||
m_qh_pool = MemoryManager::the().allocate_kernel_region_with_vmobject(*qh_pool_vmobject, 2 * PAGE_SIZE, "UHCI Queue Head Pool", Region::Access::Write);
|
||||
auto qh_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
|
||||
m_qh_pool = MM.allocate_kernel_region_with_vmobject(*qh_pool_vmobject, 2 * PAGE_SIZE, "UHCI Queue Head Pool", Memory::Region::Access::Write);
|
||||
memset(m_qh_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE); // Zero out both pages
|
||||
|
||||
// Let's populate our free qh list (so we have some we can allocate later on)
|
||||
|
@ -331,8 +331,8 @@ UNMAP_AFTER_INIT void UHCIController::create_structures()
|
|||
m_dummy_qh = allocate_queue_head();
|
||||
|
||||
// Now the Transfer Descriptor pool
|
||||
auto td_pool_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
|
||||
m_td_pool = MemoryManager::the().allocate_kernel_region_with_vmobject(*td_pool_vmobject, 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Region::Access::Write);
|
||||
auto td_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE);
|
||||
m_td_pool = MM.allocate_kernel_region_with_vmobject(*td_pool_vmobject, 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Memory::Region::Access::Write);
|
||||
memset(m_td_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE);
|
||||
|
||||
// Set up the Isochronous Transfer Descriptor list
|
||||
|
|
|
@ -90,9 +90,9 @@ private:
|
|||
QueueHead* m_bulk_qh;
|
||||
QueueHead* m_dummy_qh; // Needed for PIIX4 hack
|
||||
|
||||
OwnPtr<Region> m_framelist;
|
||||
OwnPtr<Region> m_qh_pool;
|
||||
OwnPtr<Region> m_td_pool;
|
||||
OwnPtr<Memory::Region> m_framelist;
|
||||
OwnPtr<Memory::Region> m_qh_pool;
|
||||
OwnPtr<Memory::Region> m_td_pool;
|
||||
|
||||
Array<RefPtr<USB::Device>, 2> m_devices; // Devices connected to the root ports (of which there are two)
|
||||
};
|
||||
|
|
|
@ -11,20 +11,20 @@ namespace Kernel::USB {
|
|||
|
||||
RefPtr<Transfer> Transfer::try_create(Pipe& pipe, u16 len)
|
||||
{
|
||||
auto vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
|
||||
auto vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE);
|
||||
if (!vmobject)
|
||||
return nullptr;
|
||||
|
||||
return AK::try_create<Transfer>(pipe, len, *vmobject);
|
||||
}
|
||||
|
||||
Transfer::Transfer(Pipe& pipe, u16 len, AnonymousVMObject& vmobject)
|
||||
Transfer::Transfer(Pipe& pipe, u16 len, Memory::AnonymousVMObject& vmobject)
|
||||
: m_pipe(pipe)
|
||||
, m_transfer_data_size(len)
|
||||
{
|
||||
// Initialize data buffer for transfer
|
||||
// This will definitely need to be refactored in the future, I doubt this will scale well...
|
||||
m_data_buffer = MemoryManager::the().allocate_kernel_region_with_vmobject(vmobject, PAGE_SIZE, "USB Transfer Buffer", Region::Access::Read | Region::Access::Write);
|
||||
m_data_buffer = MM.allocate_kernel_region_with_vmobject(vmobject, PAGE_SIZE, "USB Transfer Buffer", Memory::Region::Access::Read | Memory::Region::Access::Write);
|
||||
}
|
||||
|
||||
Transfer::~Transfer()
|
||||
|
|
|
@ -23,7 +23,7 @@ public:
|
|||
|
||||
public:
|
||||
Transfer() = delete;
|
||||
Transfer(Pipe& pipe, u16 len, AnonymousVMObject&);
|
||||
Transfer(Pipe& pipe, u16 len, Memory::AnonymousVMObject&);
|
||||
~Transfer();
|
||||
|
||||
void set_setup_packet(const USBRequestData& request);
|
||||
|
@ -41,11 +41,11 @@ public:
|
|||
bool error_occurred() const { return m_error_occurred; }
|
||||
|
||||
private:
|
||||
Pipe& m_pipe; // Pipe that initiated this transfer
|
||||
USBRequestData m_request; // USB request
|
||||
OwnPtr<Region> m_data_buffer; // DMA Data buffer for transaction
|
||||
u16 m_transfer_data_size { 0 }; // Size of the transfer's data stage
|
||||
bool m_complete { false }; // Has this transfer been completed?
|
||||
bool m_error_occurred { false }; // Did an error occur during this transfer?
|
||||
Pipe& m_pipe; // Pipe that initiated this transfer
|
||||
USBRequestData m_request; // USB request
|
||||
OwnPtr<Memory::Region> m_data_buffer; // DMA Data buffer for transaction
|
||||
u16 m_transfer_data_size { 0 }; // Size of the transfer's data stage
|
||||
bool m_complete { false }; // Has this transfer been completed?
|
||||
bool m_error_occurred { false }; // Did an error occur during this transfer?
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue