mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 08:58:11 +00:00

The new PCI subsystem is initialized during runtime. PCI::Initializer is supposed to be called during early boot, to perform a few tests, and initialize the proper configuration space access mechanism. Kernel boot parameters can be specified by a user to determine what tests will occur, to aid debugging on problematic machines. After that, PCI::Initializer should be dismissed. PCI::IOAccess is a class that is derived from PCI::Access class and implements PCI configuration space access mechanism via x86 IO ports. PCI::MMIOAccess is a class that is derived from PCI::Access and implements PCI configurtaion space access mechanism via memory access. The new PCI subsystem also supports determination of IO/MMIO space needed by a device by checking a given BAR. In addition, Every device or component that use the PCI subsystem has changed to match the last changes.
215 lines
8 KiB
C++
215 lines
8 KiB
C++
#include <AK/Optional.h>
|
|
#include <Kernel/IO.h>
|
|
#include <Kernel/PCI/MMIOAccess.h>
|
|
#include <Kernel/VM/MemoryManager.h>
|
|
|
|
#define PCI_MMIO_CONFIG_SPACE_SIZE 4096
|
|
|
|
uint32_t PCI::MMIOAccess::get_segments_count()
|
|
{
|
|
return m_segments.size();
|
|
}
|
|
uint8_t PCI::MMIOAccess::get_segment_start_bus(u32 seg)
|
|
{
|
|
ASSERT(m_segments.contains(seg));
|
|
return m_segments.get(seg).value()->get_start_bus();
|
|
}
|
|
uint8_t PCI::MMIOAccess::get_segment_end_bus(u32 seg)
|
|
{
|
|
ASSERT(m_segments.contains(seg));
|
|
return m_segments.get(seg).value()->get_end_bus();
|
|
}
|
|
|
|
void PCI::MMIOAccess::initialize(ACPI_RAW::MCFG& mcfg)
|
|
{
|
|
if (!PCI::Access::is_initialized())
|
|
new PCI::MMIOAccess(mcfg);
|
|
}
|
|
|
|
PCI::MMIOAccess::MMIOAccess(ACPI_RAW::MCFG& raw_mcfg)
|
|
: m_mcfg(raw_mcfg)
|
|
, m_segments(*new HashMap<u16, MMIOSegment*>())
|
|
{
|
|
kprintf("PCI: Using MMIO Mechanism for PCI Configuartion Space Access\n");
|
|
m_mmio_segment = MM.allocate_kernel_region(PAGE_ROUND_UP(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO", Region::Access::Read | Region::Access::Write);
|
|
|
|
OwnPtr<Region> checkup_region = MM.allocate_kernel_region((PAGE_SIZE * 2), "PCI MCFG Checkup", Region::Access::Read | Region::Access::Write);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Checking MCFG Table length to choose the correct mapping size\n");
|
|
#endif
|
|
mmap_region(*checkup_region, PhysicalAddress((u32)&raw_mcfg & 0xfffff000));
|
|
ACPI_RAW::SDTHeader* sdt = (ACPI_RAW::SDTHeader*)(checkup_region->vaddr().get() + ((u32)&raw_mcfg & 0xfff));
|
|
u32 length = sdt->length;
|
|
u8 revision = sdt->revision;
|
|
|
|
kprintf("PCI: MCFG, length - %u, revision %d\n", length, revision);
|
|
checkup_region->unmap();
|
|
|
|
auto mcfg_region = MM.allocate_kernel_region(PAGE_ROUND_UP(length) + PAGE_SIZE, "PCI Parsing MCFG", Region::Access::Read | Region::Access::Write);
|
|
mmap_region(*mcfg_region, PhysicalAddress((u32)&raw_mcfg & 0xfffff000));
|
|
|
|
ACPI_RAW::MCFG& mcfg = *((ACPI_RAW::MCFG*)(mcfg_region->vaddr().get() + ((u32)&raw_mcfg & 0xfff)));
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Checking MCFG @ V 0x%x, P 0x%x\n", &mcfg, &raw_mcfg);
|
|
#endif
|
|
|
|
for (u32 index = 0; index < ((mcfg.header.length - sizeof(ACPI_RAW::MCFG)) / sizeof(ACPI_RAW::PCI_MMIO_Descriptor)); index++) {
|
|
u8 start_bus = mcfg.descriptors[index].start_pci_bus;
|
|
u8 end_bus = mcfg.descriptors[index].end_pci_bus;
|
|
u32 lower_addr = mcfg.descriptors[index].base_addr;
|
|
|
|
m_segments.set(index, new PCI::MMIOSegment(PhysicalAddress(lower_addr), start_bus, end_bus));
|
|
kprintf("PCI: New PCI segment @ P 0x%x, PCI buses (%d-%d)\n", lower_addr, start_bus, end_bus);
|
|
}
|
|
mcfg_region->unmap();
|
|
kprintf("PCI: MMIO segments - %d\n", m_segments.size());
|
|
map_device(Address(0, 0, 0, 0));
|
|
}
|
|
|
|
void PCI::MMIOAccess::map_device(Address address)
|
|
{
|
|
// FIXME: Map and put some lock!
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Mapping Device @ pci (%d:%d:%d:%d)\n", address.seg(), address.bus(), address.slot(), address.function());
|
|
#endif
|
|
ASSERT(m_segments.contains(address.seg()));
|
|
auto segment = m_segments.get(address.seg());
|
|
PhysicalAddress segment_lower_addr = segment.value()->get_paddr();
|
|
PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset(
|
|
PCI_MMIO_CONFIG_SPACE_SIZE * address.function() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE) * address.slot() + (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS) * (address.bus() - segment.value()->get_start_bus()));
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Mapping (%d:%d:%d:%d), V 0x%x, P 0x%x\n", address.seg(), address.bus(), address.slot(), address.function(), m_mmio_segment->vaddr().get(), device_physical_mmio_space.get());
|
|
#endif
|
|
MM.map_for_kernel(m_mmio_segment->vaddr(), device_physical_mmio_space, false);
|
|
}
|
|
|
|
u8 PCI::MMIOAccess::read8_field(Address address, u32 field)
|
|
{
|
|
ASSERT(field <= 0xfff);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Reading field %u, Address(%u:%u:%u:%u)\n", field, address.seg(), address.bus(), address.slot(), address.function());
|
|
#endif
|
|
map_device(address);
|
|
return *((u8*)(m_mmio_segment->vaddr().get() + (field & 0xfff)));
|
|
}
|
|
|
|
u16 PCI::MMIOAccess::read16_field(Address address, u32 field)
|
|
{
|
|
ASSERT(field < 0xfff);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Reading field %u, Address(%u:%u:%u:%u)\n", field, address.seg(), address.bus(), address.slot(), address.function());
|
|
#endif
|
|
map_device(address);
|
|
return *((u16*)(m_mmio_segment->vaddr().get() + (field & 0xfff)));
|
|
}
|
|
|
|
u32 PCI::MMIOAccess::read32_field(Address address, u32 field)
|
|
{
|
|
ASSERT(field <= 0xffc);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Reading field %u, Address(%u:%u:%u:%u)\n", field, address.seg(), address.bus(), address.slot(), address.function());
|
|
#endif
|
|
map_device(address);
|
|
return *((u32*)(m_mmio_segment->vaddr().get() + (field & 0xfff)));
|
|
}
|
|
|
|
void PCI::MMIOAccess::write8_field(Address address, u32 field, u8 value)
|
|
{
|
|
ASSERT(field <= 0xfff);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Write to field %u, Address(%u:%u:%u:%u), value 0x%x\n", field, address.seg(), address.bus(), address.slot(), address.function(), value);
|
|
#endif
|
|
map_device(address);
|
|
*((u8*)(m_mmio_segment->vaddr().get() + (field & 0xfff))) = value;
|
|
}
|
|
void PCI::MMIOAccess::write16_field(Address address, u32 field, u16 value)
|
|
{
|
|
ASSERT(field < 0xfff);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Write to field %u, Address(%u:%u:%u:%u), value 0x%x\n", field, address.seg(), address.bus(), address.slot(), address.function(), value);
|
|
#endif
|
|
map_device(address);
|
|
*((u16*)(m_mmio_segment->vaddr().get() + (field & 0xfff))) = value;
|
|
}
|
|
void PCI::MMIOAccess::write32_field(Address address, u32 field, u32 value)
|
|
{
|
|
ASSERT(field <= 0xffc);
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Write to field %u, Address(%u:%u:%u:%u), value 0x%x\n", field, address.seg(), address.bus(), address.slot(), address.function(), value);
|
|
#endif
|
|
map_device(address);
|
|
*((u32*)(m_mmio_segment->vaddr().get() + (field & 0xfff))) = value;
|
|
}
|
|
|
|
void PCI::MMIOAccess::enumerate_all(Function<void(Address, ID)>& callback)
|
|
{
|
|
for (u16 seg = 0; seg < m_segments.size(); seg++) {
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Enumerating Memory mapped IO segment %u\n", seg);
|
|
#endif
|
|
// Single PCI host controller.
|
|
if ((read8_field(Address(seg), PCI_HEADER_TYPE) & 0x80) == 0) {
|
|
enumerate_bus(-1, 0, callback);
|
|
return;
|
|
}
|
|
|
|
// Multiple PCI host controllers.
|
|
for (u8 function = 0; function < 8; ++function) {
|
|
if (read16_field(Address(seg, 0, 0, function), PCI_VENDOR_ID) == PCI_NONE)
|
|
break;
|
|
enumerate_bus(-1, function, callback);
|
|
}
|
|
}
|
|
}
|
|
|
|
void PCI::MMIOAccess::mmap(VirtualAddress vaddr, PhysicalAddress paddr, u32 length)
|
|
{
|
|
unsigned i = 0;
|
|
while (length >= PAGE_SIZE) {
|
|
MM.map_for_kernel(VirtualAddress(vaddr.offset(i * PAGE_SIZE).get()), PhysicalAddress(paddr.offset(i * PAGE_SIZE).get()));
|
|
#ifdef ACPI_DEBUG
|
|
dbgprintf("PCI: map - V 0x%x -> P 0x%x\n", vaddr.offset(i * PAGE_SIZE).get(), paddr.offset(i * PAGE_SIZE).get());
|
|
#endif
|
|
length -= PAGE_SIZE;
|
|
i++;
|
|
}
|
|
if (length > 0) {
|
|
MM.map_for_kernel(vaddr.offset(i * PAGE_SIZE), paddr.offset(i * PAGE_SIZE), true);
|
|
}
|
|
#ifdef ACPI_DEBUG
|
|
dbgprintf("PCI: Finished mapping\n");
|
|
#endif
|
|
}
|
|
|
|
void PCI::MMIOAccess::mmap_region(Region& region, PhysicalAddress paddr)
|
|
{
|
|
#ifdef PCI_DEBUG
|
|
dbgprintf("PCI: Mapping region, size - %u\n", region.size());
|
|
#endif
|
|
mmap(region.vaddr(), paddr, region.size());
|
|
}
|
|
|
|
PCI::MMIOSegment::MMIOSegment(PhysicalAddress segment_base_addr, u8 start_bus, u8 end_bus)
|
|
: m_base_addr(segment_base_addr)
|
|
, m_start_bus(start_bus)
|
|
, m_end_bus(end_bus)
|
|
{
|
|
}
|
|
u8 PCI::MMIOSegment::get_start_bus()
|
|
{
|
|
return m_start_bus;
|
|
}
|
|
u8 PCI::MMIOSegment::get_end_bus()
|
|
{
|
|
return m_end_bus;
|
|
}
|
|
|
|
size_t PCI::MMIOSegment::get_size()
|
|
{
|
|
return (PCI_MMIO_CONFIG_SPACE_SIZE * PCI_MAX_FUNCTIONS_PER_DEVICE * PCI_MAX_DEVICES_PER_BUS * (get_end_bus() - get_start_bus()));
|
|
}
|
|
|
|
PhysicalAddress PCI::MMIOSegment::get_paddr()
|
|
{
|
|
return m_base_addr;
|
|
}
|