1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 01:17:35 +00:00

Kernel: Move x86-specific IRQ controller code to Arch/x86 directory

The PIC and APIC code are specific to x86 platforms, so move them out of
the general Interrupts directory to Arch/x86/common/Interrupts directory
instead.
This commit is contained in:
Liav A 2022-09-03 19:52:14 +03:00 committed by Linus Groh
parent aeef1c52bc
commit 1b7b360ca1
16 changed files with 19 additions and 19 deletions

View file

@ -11,9 +11,9 @@
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86/IRQController.h>
#include <Kernel/Arch/x86/common/Interrupts/IOAPIC.h>
#include <Kernel/Firmware/ACPI/Definitions.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Interrupts/IOAPIC.h>
#include <Kernel/Library/LockRefPtr.h>
namespace Kernel {

View file

@ -9,11 +9,11 @@
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Arch/Interrupts.h>
#include <Kernel/Arch/x86/InterruptManagement.h>
#include <Kernel/Arch/x86/common/Interrupts/APIC.h>
#include <Kernel/Arch/x86/common/Interrupts/IOAPIC.h>
#include <Kernel/Arch/x86/common/Interrupts/PIC.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Firmware/MultiProcessor/Parser.h>
#include <Kernel/Interrupts/APIC.h>
#include <Kernel/Interrupts/IOAPIC.h>
#include <Kernel/Interrupts/PIC.h>
#include <Kernel/Interrupts/SharedIRQHandler.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
#include <Kernel/Memory/TypedMapping.h>

View file

@ -9,8 +9,8 @@
#include <AK/Types.h>
#include <Kernel/Arch/Interrupts.h>
#include <Kernel/Arch/x86/common/Interrupts/PIC.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Interrupts/PIC.h>
#include <Kernel/Interrupts/SharedIRQHandler.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
#include <Kernel/Interrupts/UnhandledInterruptHandler.h>

View file

@ -0,0 +1,675 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Memory.h>
#include <AK/Singleton.h>
#include <AK/Types.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/x86/MSR.h>
#include <Kernel/Arch/x86/ProcessorInfo.h>
#include <Kernel/Arch/x86/common/Interrupts/APIC.h>
#include <Kernel/Debug.h>
#include <Kernel/Firmware/ACPI/Parser.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/Panic.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>
#include <Kernel/Thread.h>
#include <Kernel/Time/APICTimer.h>
#define IRQ_APIC_TIMER (0xfc - IRQ_VECTOR_BASE)
#define IRQ_APIC_IPI (0xfd - IRQ_VECTOR_BASE)
#define IRQ_APIC_ERR (0xfe - IRQ_VECTOR_BASE)
#define IRQ_APIC_SPURIOUS (0xff - IRQ_VECTOR_BASE)
#define APIC_ICR_DELIVERY_PENDING (1 << 12)
#define APIC_ENABLED (1 << 8)
#define APIC_BASE_MSR 0x1b
#define APIC_REGS_MSR_BASE 0x800
#define APIC_REG_ID 0x20
#define APIC_REG_EOI 0xb0
#define APIC_REG_LD 0xd0
#define APIC_REG_DF 0xe0
#define APIC_REG_SIV 0xf0
#define APIC_REG_TPR 0x80
#define APIC_REG_ICR_LOW 0x300
#define APIC_REG_ICR_HIGH 0x310
#define APIC_REG_LVT_TIMER 0x320
#define APIC_REG_LVT_THERMAL 0x330
#define APIC_REG_LVT_PERFORMANCE_COUNTER 0x340
#define APIC_REG_LVT_LINT0 0x350
#define APIC_REG_LVT_LINT1 0x360
#define APIC_REG_LVT_ERR 0x370
#define APIC_REG_TIMER_INITIAL_COUNT 0x380
#define APIC_REG_TIMER_CURRENT_COUNT 0x390
#define APIC_REG_TIMER_CONFIGURATION 0x3e0
namespace Kernel {
static Singleton<APIC> s_apic;
class APICIPIInterruptHandler final : public GenericInterruptHandler {
public:
explicit APICIPIInterruptHandler(u8 interrupt_vector)
: GenericInterruptHandler(interrupt_vector, true)
{
}
virtual ~APICIPIInterruptHandler()
{
}
static void initialize(u8 interrupt_number)
{
auto* handler = new APICIPIInterruptHandler(interrupt_number);
handler->register_interrupt_handler();
}
virtual bool handle_interrupt(RegisterState const&) override;
virtual bool eoi() override;
virtual HandlerType type() const override { return HandlerType::IRQHandler; }
virtual StringView purpose() const override { return "IPI Handler"sv; }
virtual StringView controller() const override { return {}; }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }
virtual bool is_sharing_with_others() const override { return false; }
private:
};
class APICErrInterruptHandler final : public GenericInterruptHandler {
public:
explicit APICErrInterruptHandler(u8 interrupt_vector)
: GenericInterruptHandler(interrupt_vector, true)
{
}
virtual ~APICErrInterruptHandler()
{
}
static void initialize(u8 interrupt_number)
{
auto* handler = new APICErrInterruptHandler(interrupt_number);
handler->register_interrupt_handler();
}
virtual bool handle_interrupt(RegisterState const&) override;
virtual bool eoi() override;
virtual HandlerType type() const override { return HandlerType::IRQHandler; }
virtual StringView purpose() const override { return "SMP Error Handler"sv; }
virtual StringView controller() const override { return {}; }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }
virtual bool is_sharing_with_others() const override { return false; }
private:
};
bool APIC::initialized()
{
return s_apic.is_initialized();
}
APIC& APIC::the()
{
VERIFY(APIC::initialized());
return *s_apic;
}
UNMAP_AFTER_INIT void APIC::initialize()
{
VERIFY(!APIC::initialized());
s_apic.ensure_instance();
}
PhysicalAddress APIC::get_base()
{
MSR msr(APIC_BASE_MSR);
auto base = msr.get();
return PhysicalAddress(base & 0xfffff000);
}
void APIC::set_base(PhysicalAddress const& base)
{
MSR msr(APIC_BASE_MSR);
u64 flags = 1 << 11;
if (m_is_x2)
flags |= 1 << 10;
msr.set(base.get() | flags);
}
void APIC::write_register(u32 offset, u32 value)
{
if (m_is_x2) {
MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
msr.set(value);
} else {
*reinterpret_cast<u32 volatile*>(m_apic_base->vaddr().offset(offset).as_ptr()) = value;
}
}
u32 APIC::read_register(u32 offset)
{
if (m_is_x2) {
MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
return (u32)msr.get();
}
return *reinterpret_cast<u32 volatile*>(m_apic_base->vaddr().offset(offset).as_ptr());
}
void APIC::set_lvt(u32 offset, u8 interrupt)
{
write_register(offset, read_register(offset) | interrupt);
}
void APIC::set_siv(u32 offset, u8 interrupt)
{
write_register(offset, read_register(offset) | interrupt | APIC_ENABLED);
}
void APIC::wait_for_pending_icr()
{
while ((read_register(APIC_REG_ICR_LOW) & APIC_ICR_DELIVERY_PENDING) != 0) {
microseconds_delay(200);
}
}
void APIC::write_icr(ICRReg const& icr)
{
if (m_is_x2) {
MSR msr(APIC_REGS_MSR_BASE + (APIC_REG_ICR_LOW >> 4));
msr.set(icr.x2_value());
} else {
write_register(APIC_REG_ICR_HIGH, icr.x_high());
write_register(APIC_REG_ICR_LOW, icr.x_low());
}
}
#define APIC_LVT_TIMER_ONESHOT 0
#define APIC_LVT_TIMER_PERIODIC (1 << 17)
#define APIC_LVT_TIMER_TSCDEADLINE (1 << 18)
#define APIC_LVT_MASKED (1 << 16)
#define APIC_LVT_TRIGGER_LEVEL (1 << 14)
#define APIC_LVT(iv, dm) (((iv)&0xff) | (((dm)&0x7) << 8))
extern "C" void apic_ap_start(void);
extern "C" u16 apic_ap_start_size;
extern "C" FlatPtr ap_cpu_init_stacks;
extern "C" FlatPtr ap_cpu_init_processor_info_array;
extern "C" u32 ap_cpu_init_cr0;
extern "C" FlatPtr ap_cpu_init_cr3;
extern "C" u32 ap_cpu_init_cr4;
extern "C" FlatPtr ap_cpu_gdtr;
extern "C" FlatPtr ap_cpu_idtr;
#if ARCH(X86_64)
extern "C" FlatPtr ap_cpu_kernel_map_base;
extern "C" FlatPtr ap_cpu_kernel_entry_function;
#endif
extern "C" [[noreturn]] void init_ap(FlatPtr, Processor*);
void APIC::eoi()
{
write_register(APIC_REG_EOI, 0x0);
}
u8 APIC::spurious_interrupt_vector()
{
return IRQ_APIC_SPURIOUS;
}
#define APIC_INIT_VAR_PTR(tpe, vaddr, varname) \
reinterpret_cast<tpe volatile*>(reinterpret_cast<ptrdiff_t>(vaddr) \
+ reinterpret_cast<ptrdiff_t>(&varname) \
- reinterpret_cast<ptrdiff_t>(&apic_ap_start))
UNMAP_AFTER_INIT bool APIC::init_bsp()
{
// FIXME: Use the ACPI MADT table
if (!MSR::have())
return false;
// check if we support local apic
CPUID id(1);
if ((id.edx() & (1 << 9)) == 0)
return false;
if (id.ecx() & (1 << 21))
m_is_x2 = true;
PhysicalAddress apic_base = get_base();
dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2 ? "x2" : "x", apic_base);
set_base(apic_base);
if (!m_is_x2) {
auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
if (region_or_error.is_error()) {
dbgln("APIC: Failed to allocate memory for APIC base");
return false;
}
m_apic_base = region_or_error.release_value();
}
auto rsdp = ACPI::StaticParsing::find_rsdp();
if (!rsdp.has_value()) {
dbgln("APIC: RSDP not found");
return false;
}
auto madt_address = ACPI::StaticParsing::find_table(rsdp.value(), "APIC"sv);
if (!madt_address.has_value()) {
dbgln("APIC: MADT table not found");
return false;
}
if (kernel_command_line().is_smp_enabled()) {
auto madt_or_error = Memory::map_typed<ACPI::Structures::MADT>(madt_address.value());
if (madt_or_error.is_error()) {
dbgln("APIC: Failed to map MADT table");
return false;
}
auto madt = madt_or_error.release_value();
size_t entry_index = 0;
size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
auto* madt_entry = madt->entries;
while (entries_length > 0) {
size_t entry_length = madt_entry->length;
if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::LocalAPIC) {
auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry;
dbgln_if(APIC_DEBUG, "APIC: AP found @ MADT entry {}, processor ID: {}, xAPIC ID: {}, flags: {:#08x}", entry_index, plapic_entry->acpi_processor_id, plapic_entry->apic_id, plapic_entry->flags);
m_processor_cnt++;
if ((plapic_entry->flags & 0x1) != 0)
m_processor_enabled_cnt++;
} else if (madt_entry->type == (u8)ACPI::Structures::MADTEntryType::Local_x2APIC) {
// Only used for APID IDs >= 255
auto* plx2apic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalX2APIC*)madt_entry;
dbgln_if(APIC_DEBUG, "APIC: AP found @ MADT entry {}, processor ID: {}, x2APIC ID: {}, flags: {:#08x}", entry_index, plx2apic_entry->acpi_processor_id, plx2apic_entry->apic_id, plx2apic_entry->flags);
m_processor_cnt++;
if ((plx2apic_entry->flags & 0x1) != 0)
m_processor_enabled_cnt++;
}
madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
entries_length -= entry_length;
entry_index++;
}
dbgln("APIC processors found: {}, enabled: {}", m_processor_cnt, m_processor_enabled_cnt);
}
if (m_processor_enabled_cnt < 1)
m_processor_enabled_cnt = 1;
if (m_processor_cnt < 1)
m_processor_cnt = 1;
enable(0);
return true;
}
UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
{
VERIFY(!m_ap_boot_environment);
VERIFY(m_processor_enabled_cnt > 1);
u32 aps_to_enable = m_processor_enabled_cnt - 1;
// Copy the APIC startup code and variables to P0x00008000
// Also account for the data appended to:
// * aps_to_enable u32 values for ap_cpu_init_stacks
// * aps_to_enable u32 values for ap_cpu_init_processor_info_array
constexpr u64 apic_startup_region_base = 0x8000;
auto apic_startup_region_size = Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(FlatPtr))).release_value_but_fixme_should_propagate_errors();
VERIFY(apic_startup_region_size < USER_RANGE_BASE);
auto apic_startup_region = MUST(MM.create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
u8* apic_startup_region_ptr = apic_startup_region->vaddr().as_ptr();
memcpy(apic_startup_region_ptr, reinterpret_cast<void const*>(apic_ap_start), apic_ap_start_size);
// Allocate enough stacks for all APs
m_ap_temporary_boot_stacks.ensure_capacity(aps_to_enable);
for (u32 i = 0; i < aps_to_enable; i++) {
auto stack_region_or_error = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow);
if (stack_region_or_error.is_error()) {
dbgln("APIC: Failed to allocate stack for AP #{}", i);
return;
}
auto stack_region = stack_region_or_error.release_value();
stack_region->set_stack(true);
m_ap_temporary_boot_stacks.unchecked_append(move(stack_region));
}
// Store pointers to all stacks for the APs to use
auto* ap_stack_array = APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_stacks);
VERIFY(aps_to_enable == m_ap_temporary_boot_stacks.size());
for (size_t i = 0; i < aps_to_enable; i++) {
ap_stack_array[i] = m_ap_temporary_boot_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size;
dbgln_if(APIC_DEBUG, "APIC: CPU[{}] stack at {}", i + 1, VirtualAddress { ap_stack_array[i] });
}
// Allocate Processor structures for all APs and store the pointer to the data
m_ap_processor_info.resize(aps_to_enable);
for (size_t i = 0; i < aps_to_enable; i++)
m_ap_processor_info[i] = adopt_nonnull_own_or_enomem(new (nothrow) Processor()).release_value_but_fixme_should_propagate_errors();
auto* ap_processor_info_array = &ap_stack_array[aps_to_enable];
for (size_t i = 0; i < aps_to_enable; i++) {
ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr());
dbgln_if(APIC_DEBUG, "APIC: CPU[{}] processor at {}", i + 1, VirtualAddress { ap_processor_info_array[i] });
}
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_processor_info_array) = FlatPtr(&ap_processor_info_array[0]);
// Store the BSP's CR3 value for the APs to use
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr3) = MM.kernel_page_directory().cr3();
// Store the BSP's GDT and IDT for the APs to use
auto const& gdtr = Processor::current().get_gdtr();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_gdtr) = FlatPtr(&gdtr);
auto const& idtr = get_idtr();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_idtr) = FlatPtr(&idtr);
#if ARCH(X86_64)
// TODO: Use these also in i686 builds
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_map_base) = FlatPtr(kernel_mapping_base);
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_entry_function) = FlatPtr(&init_ap);
#endif
// Store the BSP's CR0 and CR4 values for the APs to use
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr0) = read_cr0();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr4) = read_cr4();
m_ap_boot_environment = move(apic_startup_region);
}
UNMAP_AFTER_INIT void APIC::do_boot_aps()
{
VERIFY(m_ap_boot_environment);
VERIFY(m_processor_enabled_cnt > 1);
u32 aps_to_enable = m_processor_enabled_cnt - 1;
// Create an idle thread for each processor. We have to do this here
// because we won't be able to send FlushTLB messages, so we have to
// have all memory set up for the threads so that when the APs are
// starting up, they can access all the memory properly
m_ap_idle_threads.resize(aps_to_enable);
for (u32 i = 0; i < aps_to_enable; i++)
m_ap_idle_threads[i] = Scheduler::create_ap_idle_thread(i + 1);
dbgln_if(APIC_DEBUG, "APIC: Starting {} AP(s)", aps_to_enable);
// INIT
write_icr({ 0, 0, ICRReg::INIT, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf });
microseconds_delay(10 * 1000);
for (int i = 0; i < 2; i++) {
// SIPI
write_icr({ 0x08, 0, ICRReg::StartUp, ICRReg::Physical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf }); // start execution at P8000
microseconds_delay(200);
}
// Now wait until the ap_cpu_init_pending variable dropped to 0, which means all APs are initialized and no longer need these special mappings
if (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable) {
dbgln_if(APIC_DEBUG, "APIC: Waiting for {} AP(s) to finish initialization...", aps_to_enable);
do {
// Wait a little bit
microseconds_delay(200);
} while (m_apic_ap_count.load(AK::MemoryOrder::memory_order_consume) != aps_to_enable);
}
dbgln_if(APIC_DEBUG, "APIC: {} processors are initialized and running", m_processor_enabled_cnt);
// NOTE: Since this region is identity-mapped, we have to unmap it manually to prevent the virtual
// address range from leaking into the general virtual range allocator.
m_ap_boot_environment->unmap();
m_ap_boot_environment = nullptr;
// When the APs signal that they finished their initialization they have already switched over to their
// idle thread's stack, so the temporary boot stack can be deallocated
m_ap_temporary_boot_stacks.clear();
}
UNMAP_AFTER_INIT void APIC::boot_aps()
{
if (m_processor_enabled_cnt <= 1)
return;
// We split this into another call because do_boot_aps() will cause
// MM calls upon exit, and we don't want to call smp_enable before that
do_boot_aps();
// Enable SMP, which means IPIs may now be sent
Processor::smp_enable();
dbgln_if(APIC_DEBUG, "All processors initialized and waiting, trigger all to continue");
// Now trigger all APs to continue execution (need to do this after
// the regions have been freed so that we don't trigger IPIs
m_apic_ap_continue.store(1, AK::MemoryOrder::memory_order_release);
}
UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
{
VERIFY(m_is_x2 || cpu < 8);
u32 apic_id;
if (m_is_x2) {
dbgln_if(APIC_DEBUG, "Enable x2APIC on CPU #{}", cpu);
// We need to enable x2 mode on each core independently
set_base(get_base());
apic_id = read_register(APIC_REG_ID);
} else {
dbgln_if(APIC_DEBUG, "Setting logical xAPIC ID for CPU #{}", cpu);
// Use the CPU# as logical apic id
VERIFY(cpu <= 8);
write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (cpu << 24));
// read it back to make sure it's actually set
apic_id = read_register(APIC_REG_LD) >> 24;
}
dbgln_if(APIC_DEBUG, "CPU #{} apic id: {}", cpu, apic_id);
Processor::current().info().set_apic_id(apic_id);
dbgln_if(APIC_DEBUG, "Enabling local APIC for CPU #{}, logical APIC ID: {}", cpu, apic_id);
if (cpu == 0) {
SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
APICErrInterruptHandler::initialize(IRQ_APIC_ERR);
// register IPI interrupt vector
APICIPIInterruptHandler::initialize(IRQ_APIC_IPI);
}
if (!m_is_x2) {
// local destination mode (flat mode), not supported in x2 mode
write_register(APIC_REG_DF, 0xf0000000);
}
// set error interrupt vector
set_lvt(APIC_REG_LVT_ERR, IRQ_APIC_ERR);
// set spurious interrupt vector
set_siv(APIC_REG_SIV, IRQ_APIC_SPURIOUS);
write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
write_register(APIC_REG_TPR, 0);
}
Thread* APIC::get_idle_thread(u32 cpu) const
{
VERIFY(cpu > 0);
return m_ap_idle_threads[cpu - 1];
}
UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
{
// This method is called once the boot stack is no longer needed
VERIFY(cpu > 0);
VERIFY(cpu < m_processor_enabled_cnt);
// Since we're waiting on other APs here, we shouldn't have the
// scheduler lock
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
// Notify the BSP that we are done initializing. It will unmap the startup data at P8000
m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
dbgln_if(APIC_DEBUG, "APIC: CPU #{} initialized, waiting for all others", cpu);
// The reason we're making all APs wait until the BSP signals them is that
// we don't want APs to trigger IPIs (e.g. through MM) while the BSP
// is unable to process them
while (!m_apic_ap_continue.load(AK::MemoryOrder::memory_order_consume)) {
microseconds_delay(200);
}
dbgln_if(APIC_DEBUG, "APIC: CPU #{} continues, all others are initialized", cpu);
// do_boot_aps() freed memory, so we need to update our tlb
Processor::flush_entire_tlb_local();
// Now enable all the interrupts
APIC::the().enable(cpu);
}
void APIC::broadcast_ipi()
{
dbgln_if(APIC_SMP_DEBUG, "SMP: Broadcast IPI from CPU #{}", Processor::current_id());
wait_for_pending_icr();
write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, 0xffffffff, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf });
}
void APIC::send_ipi(u32 cpu)
{
dbgln_if(APIC_SMP_DEBUG, "SMP: Send IPI from CPU #{} to CPU #{}", Processor::current_id(), cpu);
VERIFY(cpu != Processor::current_id());
VERIFY(cpu < Processor::count());
wait_for_pending_icr();
write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2 ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2 ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand });
}
UNMAP_AFTER_INIT APICTimer* APIC::initialize_timers(HardwareTimerBase& calibration_timer)
{
if (!m_apic_base && !m_is_x2)
return nullptr;
// We should only initialize and calibrate the APIC timer once on the BSP!
VERIFY(Processor::is_bootstrap_processor());
VERIFY(!m_apic_timer);
m_apic_timer = APICTimer::initialize(IRQ_APIC_TIMER, calibration_timer);
return m_apic_timer;
}
void APIC::setup_local_timer(u32 ticks, TimerMode timer_mode, bool enable)
{
u32 flags = 0;
switch (timer_mode) {
case TimerMode::OneShot:
flags |= APIC_LVT_TIMER_ONESHOT;
break;
case TimerMode::Periodic:
flags |= APIC_LVT_TIMER_PERIODIC;
break;
case TimerMode::TSCDeadline:
flags |= APIC_LVT_TIMER_TSCDEADLINE;
break;
}
if (!enable)
flags |= APIC_LVT_MASKED;
write_register(APIC_REG_LVT_TIMER, APIC_LVT(IRQ_APIC_TIMER + IRQ_VECTOR_BASE, 0) | flags);
u32 config = read_register(APIC_REG_TIMER_CONFIGURATION);
config &= ~0xf; // clear divisor (bits 0-3)
switch (get_timer_divisor()) {
case 1:
config |= (1 << 3) | 3;
break;
case 2:
break;
case 4:
config |= 1;
break;
case 8:
config |= 2;
break;
case 16:
config |= 3;
break;
case 32:
config |= (1 << 3);
break;
case 64:
config |= (1 << 3) | 1;
break;
case 128:
config |= (1 << 3) | 2;
break;
default:
VERIFY_NOT_REACHED();
}
write_register(APIC_REG_TIMER_CONFIGURATION, config);
if (timer_mode == TimerMode::Periodic)
write_register(APIC_REG_TIMER_INITIAL_COUNT, ticks / get_timer_divisor());
}
u32 APIC::get_timer_current_count()
{
return read_register(APIC_REG_TIMER_CURRENT_COUNT);
}
u32 APIC::get_timer_divisor()
{
return 16;
}
bool APICIPIInterruptHandler::handle_interrupt(RegisterState const&)
{
dbgln_if(APIC_SMP_DEBUG, "APIC IPI on CPU #{}", Processor::current_id());
return true;
}
bool APICIPIInterruptHandler::eoi()
{
dbgln_if(APIC_SMP_DEBUG, "SMP: IPI EOI");
APIC::the().eoi();
return true;
}
bool APICErrInterruptHandler::handle_interrupt(RegisterState const&)
{
dbgln("APIC: SMP error on CPU #{}", Processor::current_id());
return true;
}
bool APICErrInterruptHandler::eoi()
{
APIC::the().eoi();
return true;
}
bool HardwareTimer<GenericInterruptHandler>::eoi()
{
APIC::the().eoi();
return true;
}
}

View file

@ -0,0 +1,115 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Time/HardwareTimer.h>
namespace Kernel {
class APICTimer;
struct LocalAPIC {
u32 apic_id;
};
class APIC {
public:
static APIC& the();
static void initialize();
static bool initialized();
bool init_bsp();
void eoi();
void setup_ap_boot_environment();
void boot_aps();
void enable(u32 cpu);
void init_finished(u32 cpu);
void broadcast_ipi();
void send_ipi(u32 cpu);
static u8 spurious_interrupt_vector();
Thread* get_idle_thread(u32 cpu) const;
u32 enabled_processor_count() const { return m_processor_enabled_cnt; }
APICTimer* initialize_timers(HardwareTimerBase&);
APICTimer* get_timer() const { return m_apic_timer; }
enum class TimerMode {
OneShot,
Periodic,
TSCDeadline
};
void setup_local_timer(u32, TimerMode, bool);
u32 get_timer_current_count();
u32 get_timer_divisor();
private:
struct ICRReg {
enum DeliveryMode {
Fixed = 0x0,
LowPriority = 0x1,
SMI = 0x2,
NMI = 0x4,
INIT = 0x5,
StartUp = 0x6,
};
enum DestinationMode {
Physical = 0x0,
Logical = 0x1,
};
enum Level {
DeAssert = 0x0,
Assert = 0x1
};
enum class TriggerMode {
Edge = 0x0,
Level = 0x1,
};
enum DestinationShorthand {
NoShorthand = 0x0,
Self = 0x1,
AllIncludingSelf = 0x2,
AllExcludingSelf = 0x3,
};
u8 vector { 0 };
u32 destination { 0 };
DeliveryMode delivery_mode { DeliveryMode::Fixed };
DestinationMode destination_mode { DestinationMode::Physical };
Level level { Level::DeAssert };
TriggerMode trigger_mode { TriggerMode::Edge };
DestinationShorthand destination_short { DestinationShorthand::NoShorthand };
u32 x_low() const { return (u32)vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destination_short << 18); }
u32 x_high() const { return destination << 24; }
u64 x2_value() const { return ((u64)destination << 32) | x_low(); }
};
OwnPtr<Memory::Region> m_apic_base;
Vector<OwnPtr<Processor>> m_ap_processor_info;
Vector<OwnPtr<Memory::Region>> m_ap_temporary_boot_stacks;
Vector<Thread*> m_ap_idle_threads;
OwnPtr<Memory::Region> m_ap_boot_environment;
Atomic<u8> m_apic_ap_count { 0 };
Atomic<u8> m_apic_ap_continue { 0 };
u32 m_processor_cnt { 0 };
u32 m_processor_enabled_cnt { 0 };
APICTimer* m_apic_timer { nullptr };
bool m_is_x2 { false };
static PhysicalAddress get_base();
void set_base(PhysicalAddress const& base);
void write_register(u32 offset, u32 value);
u32 read_register(u32 offset);
void set_lvt(u32 offset, u8 interrupt);
void set_siv(u32 offset, u8 interrupt);
void wait_for_pending_icr();
void write_icr(ICRReg const& icr);
void do_boot_aps();
};
}

View file

@ -0,0 +1,311 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Optional.h>
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Arch/x86/InterruptManagement.h>
#include <Kernel/Arch/x86/common/Interrupts/APIC.h>
#include <Kernel/Arch/x86/common/Interrupts/IOAPIC.h>
#include <Kernel/Debug.h>
#include <Kernel/Sections.h>
#define IOAPIC_REDIRECTION_ENTRY_OFFSET 0x10
namespace Kernel {
enum DeliveryMode {
Normal = 0,
LowPriority = 1,
SMI = 2,
NMI = 3,
INIT = 4,
External = 7
};
UNMAP_AFTER_INIT IOAPIC::IOAPIC(PhysicalAddress address, u32 gsi_base)
: m_address(address)
, m_regs(Memory::map_typed_writable<ioapic_mmio_regs>(m_address).release_value_but_fixme_should_propagate_errors())
, m_gsi_base(gsi_base)
, m_id((read_register(0x0) >> 24) & 0xFF)
, m_version(read_register(0x1) & 0xFF)
, m_redirection_entries_count((read_register(0x1) >> 16) + 1)
{
InterruptDisabler disabler;
dmesgln("IOAPIC ID: {:#x}", m_id);
dmesgln("IOAPIC Version: {:#x}, redirection entries: {}", m_version, m_redirection_entries_count);
dmesgln("IOAPIC Arbitration ID {:#x}", read_register(0x2));
mask_all_redirection_entries();
}
UNMAP_AFTER_INIT void IOAPIC::initialize()
{
}
void IOAPIC::map_interrupt_redirection(u8 interrupt_vector)
{
InterruptDisabler disabler;
for (auto redirection_override : InterruptManagement::the().isa_overrides()) {
if (redirection_override.source() != interrupt_vector)
continue;
bool active_low = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch ((redirection_override.flags() & 0b11)) {
case 0:
active_low = false;
break;
case 1:
active_low = false;
break;
case 2:
VERIFY_NOT_REACHED(); // Reserved value
case 3:
active_low = true;
break;
}
bool trigger_level_mode = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch (((redirection_override.flags() >> 2) & 0b11)) {
case 0:
trigger_level_mode = false;
break;
case 1:
trigger_level_mode = false;
break;
case 2:
VERIFY_NOT_REACHED(); // Reserved value
case 3:
trigger_level_mode = true;
break;
}
configure_redirection_entry(redirection_override.gsi() - gsi_base(), InterruptManagement::acquire_mapped_interrupt_number(redirection_override.source()) + IRQ_VECTOR_BASE, DeliveryMode::Normal, false, active_low, trigger_level_mode, true, 0);
return;
}
isa_identity_map(interrupt_vector);
}
void IOAPIC::isa_identity_map(int index)
{
InterruptDisabler disabler;
configure_redirection_entry(index, InterruptManagement::acquire_mapped_interrupt_number(index) + IRQ_VECTOR_BASE, DeliveryMode::Normal, false, false, false, true, 0);
}
void IOAPIC::map_pci_interrupts()
{
InterruptDisabler disabler;
configure_redirection_entry(11, 11 + IRQ_VECTOR_BASE, DeliveryMode::Normal, false, false, true, true, 0);
}
bool IOAPIC::is_enabled() const
{
return !is_hard_disabled();
}
void IOAPIC::spurious_eoi(GenericInterruptHandler const& handler) const
{
InterruptDisabler disabler;
VERIFY(handler.type() == HandlerType::SpuriousInterruptHandler);
VERIFY(handler.interrupt_number() == APIC::spurious_interrupt_vector());
dbgln("IOAPIC: Spurious interrupt");
}
void IOAPIC::map_isa_interrupts()
{
InterruptDisabler disabler;
for (auto redirection_override : InterruptManagement::the().isa_overrides()) {
if ((redirection_override.gsi() < gsi_base()) || (redirection_override.gsi() >= (gsi_base() + m_redirection_entries_count)))
continue;
bool active_low = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch ((redirection_override.flags() & 0b11)) {
case 0:
active_low = false;
break;
case 1:
active_low = false;
break;
case 2:
VERIFY_NOT_REACHED();
case 3:
active_low = true;
break;
}
bool trigger_level_mode = false;
// See ACPI spec Version 6.2, page 205 to learn more about Interrupt Overriding Flags.
switch (((redirection_override.flags() >> 2) & 0b11)) {
case 0:
trigger_level_mode = false;
break;
case 1:
trigger_level_mode = false;
break;
case 2:
VERIFY_NOT_REACHED();
case 3:
trigger_level_mode = true;
break;
}
configure_redirection_entry(redirection_override.gsi() - gsi_base(), InterruptManagement::acquire_mapped_interrupt_number(redirection_override.source()) + IRQ_VECTOR_BASE, 0, false, active_low, trigger_level_mode, true, 0);
}
}
void IOAPIC::reset_all_redirection_entries() const
{
InterruptDisabler disabler;
for (size_t index = 0; index < m_redirection_entries_count; index++)
reset_redirection_entry(index);
}
void IOAPIC::hard_disable()
{
InterruptDisabler disabler;
reset_all_redirection_entries();
IRQController::hard_disable();
}
void IOAPIC::reset_redirection_entry(int index) const
{
InterruptDisabler disabler;
configure_redirection_entry(index, 0, 0, false, false, false, true, 0);
}
void IOAPIC::configure_redirection_entry(int index, u8 interrupt_vector, u8 delivery_mode, bool logical_destination, bool active_low, bool trigger_level_mode, bool masked, u8 destination) const
{
InterruptDisabler disabler;
VERIFY((u32)index < m_redirection_entries_count);
u32 redirection_entry1 = interrupt_vector | (delivery_mode & 0b111) << 8 | logical_destination << 11 | active_low << 13 | trigger_level_mode << 15 | masked << 16;
u32 redirection_entry2 = destination << 24;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry1);
if constexpr (IOAPIC_DEBUG)
dbgln("IOAPIC Value: {:#x}", read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET));
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET + 1, redirection_entry2);
if constexpr (IOAPIC_DEBUG)
dbgln("IOAPIC Value: {:#x}", read_register((index << 1) + 0x11));
}
void IOAPIC::mask_all_redirection_entries() const
{
InterruptDisabler disabler;
for (size_t index = 0; index < m_redirection_entries_count; index++)
mask_redirection_entry(index);
}
void IOAPIC::mask_redirection_entry(u8 index) const
{
VERIFY((u32)index < m_redirection_entries_count);
u32 redirection_entry = read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET);
if (redirection_entry & (1 << 16))
return;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry | (1 << 16));
}
bool IOAPIC::is_redirection_entry_masked(u8 index) const
{
VERIFY((u32)index < m_redirection_entries_count);
return (read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET) & (1 << 16)) != 0;
}
void IOAPIC::unmask_redirection_entry(u8 index) const
{
VERIFY((u32)index < m_redirection_entries_count);
u32 redirection_entry = read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET);
if (!(redirection_entry & (1 << 16)))
return;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry & ~(1 << 16));
}
bool IOAPIC::is_vector_enabled(u8 interrupt_vector) const
{
InterruptDisabler disabler;
return is_redirection_entry_masked(interrupt_vector);
}
u8 IOAPIC::read_redirection_entry_vector(u8 index) const
{
VERIFY((u32)index < m_redirection_entries_count);
return (read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET) & 0xFF);
}
Optional<int> IOAPIC::find_redirection_entry_by_vector(u8 vector) const
{
InterruptDisabler disabler;
for (size_t index = 0; index < m_redirection_entries_count; index++) {
if (read_redirection_entry_vector(index) == (InterruptManagement::acquire_mapped_interrupt_number(vector) + IRQ_VECTOR_BASE))
return index;
}
return {};
}
void IOAPIC::disable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
u8 interrupt_vector = handler.interrupt_number();
VERIFY(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
auto found_index = find_redirection_entry_by_vector(interrupt_vector);
if (!found_index.has_value()) {
map_interrupt_redirection(interrupt_vector);
found_index = find_redirection_entry_by_vector(interrupt_vector);
}
VERIFY(found_index.has_value());
mask_redirection_entry(found_index.value());
}
void IOAPIC::enable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
u8 interrupt_vector = handler.interrupt_number();
VERIFY(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
auto found_index = find_redirection_entry_by_vector(interrupt_vector);
if (!found_index.has_value()) {
map_interrupt_redirection(interrupt_vector);
found_index = find_redirection_entry_by_vector(interrupt_vector);
}
VERIFY(found_index.has_value());
unmask_redirection_entry(found_index.value());
}
void IOAPIC::eoi(GenericInterruptHandler const& handler) const
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
VERIFY(handler.type() != HandlerType::SpuriousInterruptHandler);
APIC::the().eoi();
}
u16 IOAPIC::get_isr() const
{
InterruptDisabler disabler;
VERIFY_NOT_REACHED();
}
u16 IOAPIC::get_irr() const
{
InterruptDisabler disabler;
VERIFY_NOT_REACHED();
}
void IOAPIC::write_register(u32 index, u32 value) const
{
InterruptDisabler disabler;
m_regs->select = index;
m_regs->window = value;
dbgln_if(IOAPIC_DEBUG, "IOAPIC Writing, Value {:#x} @ offset {:#x}", (u32)m_regs->window, (u32)m_regs->select);
}
u32 IOAPIC::read_register(u32 index) const
{
InterruptDisabler disabler;
m_regs->select = index;
dbgln_if(IOAPIC_DEBUG, "IOAPIC Reading, Value {:#x} @ offset {:#x}", (u32)m_regs->window, (u32)m_regs->select);
return m_regs->window;
}
}

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/x86/IRQController.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
struct [[gnu::packed]] ioapic_mmio_regs {
volatile u32 select;
u32 reserved[3];
volatile u32 window;
};
class PCIInterruptOverrideMetadata {
public:
PCIInterruptOverrideMetadata(u8 bus_id, u8 polarity, u8 trigger_mode, u8 source_irq, u32 ioapic_id, u16 ioapic_int_pin);
u8 bus() const { return m_bus_id; }
u8 polarity() const { return m_polarity; }
u8 trigger_mode() const { return m_trigger_mode; }
u8 pci_interrupt_pin() const { return m_pci_interrupt_pin; }
u8 pci_device_number() const { return m_pci_device_number; }
u32 ioapic_id() const { return m_ioapic_id; }
u16 ioapic_interrupt_pin() const { return m_ioapic_interrupt_pin; }
private:
const u8 m_bus_id;
const u8 m_polarity;
const u8 m_trigger_mode;
const u8 m_pci_interrupt_pin;
const u8 m_pci_device_number;
const u32 m_ioapic_id;
const u16 m_ioapic_interrupt_pin;
};
class IOAPIC final : public IRQController {
public:
IOAPIC(PhysicalAddress, u32 gsi_base);
virtual void enable(GenericInterruptHandler const&) override;
virtual void disable(GenericInterruptHandler const&) override;
virtual void hard_disable() override;
virtual void eoi(GenericInterruptHandler const&) const override;
virtual void spurious_eoi(GenericInterruptHandler const&) const override;
virtual bool is_vector_enabled(u8 number) const override;
virtual bool is_enabled() const override;
virtual u16 get_isr() const override;
virtual u16 get_irr() const override;
virtual u32 gsi_base() const override { return m_gsi_base; }
virtual size_t interrupt_vectors_count() const override { return m_redirection_entries_count; }
virtual StringView model() const override { return "IOAPIC"sv; };
virtual IRQControllerType type() const override { return IRQControllerType::i82093AA; }
private:
void configure_redirection_entry(int index, u8 interrupt_vector, u8 delivery_mode, bool logical_destination, bool active_low, bool trigger_level_mode, bool masked, u8 destination) const;
void reset_redirection_entry(int index) const;
void map_interrupt_redirection(u8 interrupt_vector);
void reset_all_redirection_entries() const;
void mask_all_redirection_entries() const;
void mask_redirection_entry(u8 index) const;
void unmask_redirection_entry(u8 index) const;
bool is_redirection_entry_masked(u8 index) const;
u8 read_redirection_entry_vector(u8 index) const;
Optional<int> find_redirection_entry_by_vector(u8 vector) const;
void configure_redirections() const;
void write_register(u32 index, u32 value) const;
u32 read_register(u32 index) const;
virtual void initialize() override;
void map_isa_interrupts();
void map_pci_interrupts();
void isa_identity_map(int index);
PhysicalAddress m_address;
mutable Memory::TypedMapping<ioapic_mmio_regs> m_regs;
u32 m_gsi_base;
u8 m_id;
u8 m_version;
size_t m_redirection_entries_count;
};
}

View file

@ -0,0 +1,230 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/InterruptDisabler.h>
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Arch/x86/common/Interrupts/PIC.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h>
#include <Kernel/Sections.h>
namespace Kernel {
// The slave 8259 is connected to the master's IRQ2 line.
// This is really only to enhance clarity.
#define SLAVE_INDEX 2
#define PIC0_CTL 0x20
#define PIC0_CMD 0x21
#define PIC1_CTL 0xA0
#define PIC1_CMD 0xA1
#define ICW1_ICW4 0x01 // ICW4 (not) needed
#define ICW1_SINGLE 0x02 // Single (cascade) mode
#define ICW1_INTERVAL4 0x04 // Call address interval 4 (8)
#define ICW1_LEVEL 0x08 // Level triggered (edge) mode
#define ICW1_INIT 0x10 // Initialization - required
#define ICW4_8086 0x01 // 8086/88 (MCS-80/85) mode
#define ICW4_AUTO 0x02 // Auto (normal) EOI
#define ICW4_BUF_SLAVE 0x08 // Buffered mode/slave
#define ICW4_BUF_MASTER 0x0C // Buffered mode/master
#define ICW4_SFNM 0x10 // Special fully nested (not)
bool inline static is_all_masked(u16 reg)
{
return reg == 0xFFFF;
}
bool PIC::is_enabled() const
{
return !is_all_masked(m_cached_irq_mask) && !is_hard_disabled();
}
void PIC::disable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
u8 irq = handler.interrupt_number();
if (m_cached_irq_mask & (1 << irq))
return;
u8 imr;
if (irq & 8) {
imr = IO::in8(PIC1_CMD);
imr |= 1 << (irq & 7);
IO::out8(PIC1_CMD, imr);
} else {
imr = IO::in8(PIC0_CMD);
imr |= 1 << irq;
IO::out8(PIC0_CMD, imr);
}
m_cached_irq_mask |= 1 << irq;
}
UNMAP_AFTER_INIT PIC::PIC()
{
initialize();
}
void PIC::spurious_eoi(GenericInterruptHandler const& handler) const
{
VERIFY(handler.type() == HandlerType::SpuriousInterruptHandler);
if (handler.interrupt_number() == 7)
return;
if (handler.interrupt_number() == 15) {
IO::in8(PIC1_CMD); /* dummy read */
IO::out8(PIC0_CTL, 0x60 | (2));
}
}
bool PIC::is_vector_enabled(u8 irq) const
{
return m_cached_irq_mask & (1 << irq);
}
void PIC::enable(GenericInterruptHandler const& handler)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
enable_vector(handler.interrupt_number());
}
void PIC::enable_vector(u8 irq)
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
if (!(m_cached_irq_mask & (1 << irq)))
return;
u8 imr;
if (irq & 8) {
imr = IO::in8(PIC1_CMD);
imr &= ~(1 << (irq & 7));
IO::out8(PIC1_CMD, imr);
} else {
imr = IO::in8(PIC0_CMD);
imr &= ~(1 << irq);
IO::out8(PIC0_CMD, imr);
}
m_cached_irq_mask &= ~(1 << irq);
}
void PIC::eoi(GenericInterruptHandler const& handler) const
{
InterruptDisabler disabler;
VERIFY(!is_hard_disabled());
u8 irq = handler.interrupt_number();
VERIFY(irq >= gsi_base() && irq < interrupt_vectors_count());
if ((1 << irq) & m_cached_irq_mask) {
spurious_eoi(handler);
return;
}
eoi_interrupt(irq);
}
void PIC::eoi_interrupt(u8 irq) const
{
if (irq & 8) {
IO::in8(PIC1_CMD); /* dummy read */
IO::out8(PIC1_CTL, 0x60 | (irq & 7));
IO::out8(PIC0_CTL, 0x60 | (2));
return;
}
IO::in8(PIC0_CMD); /* dummy read */
IO::out8(PIC0_CTL, 0x60 | irq);
}
void PIC::complete_eoi() const
{
IO::out8(PIC1_CTL, 0x20);
IO::out8(PIC0_CTL, 0x20);
}
void PIC::hard_disable()
{
InterruptDisabler disabler;
remap(pic_disabled_vector_base);
IO::out8(PIC0_CMD, 0xff);
IO::out8(PIC1_CMD, 0xff);
m_cached_irq_mask = 0xffff;
IRQController::hard_disable();
}
void PIC::remap(u8 offset)
{
/* ICW1 (edge triggered mode, cascading controllers, expect ICW4) */
IO::out8(PIC0_CTL, ICW1_INIT | ICW1_ICW4);
IO::out8(PIC1_CTL, ICW1_INIT | ICW1_ICW4);
/* ICW2 (upper 5 bits specify ISR indices, lower 3 don't specify anything) */
IO::out8(PIC0_CMD, offset);
IO::out8(PIC1_CMD, offset + 0x08);
/* ICW3 (configure master/slave relationship) */
IO::out8(PIC0_CMD, 1 << SLAVE_INDEX);
IO::out8(PIC1_CMD, SLAVE_INDEX);
/* ICW4 (set x86 mode) */
IO::out8(PIC0_CMD, ICW4_8086);
IO::out8(PIC1_CMD, ICW4_8086);
// Mask -- start out with all IRQs disabled.
IO::out8(PIC0_CMD, 0xff);
IO::out8(PIC1_CMD, 0xff);
m_cached_irq_mask = 0xffff;
// ...except IRQ2, since that's needed for the master to let through slave interrupts.
enable_vector(2);
}
UNMAP_AFTER_INIT void PIC::initialize()
{
/* ICW1 (edge triggered mode, cascading controllers, expect ICW4) */
IO::out8(PIC0_CTL, ICW1_INIT | ICW1_ICW4);
IO::out8(PIC1_CTL, ICW1_INIT | ICW1_ICW4);
/* ICW2 (upper 5 bits specify ISR indices, lower 3 don't specify anything) */
IO::out8(PIC0_CMD, IRQ_VECTOR_BASE);
IO::out8(PIC1_CMD, IRQ_VECTOR_BASE + 0x08);
/* ICW3 (configure master/slave relationship) */
IO::out8(PIC0_CMD, 1 << SLAVE_INDEX);
IO::out8(PIC1_CMD, SLAVE_INDEX);
/* ICW4 (set x86 mode) */
IO::out8(PIC0_CMD, ICW4_8086);
IO::out8(PIC1_CMD, ICW4_8086);
// Mask -- start out with all IRQs disabled.
IO::out8(PIC0_CMD, 0xff);
IO::out8(PIC1_CMD, 0xff);
// ...except IRQ2, since that's needed for the master to let through slave interrupts.
enable_vector(2);
dmesgln("PIC: Cascading mode, vectors {:#02x}-{:#02x}", IRQ_VECTOR_BASE, IRQ_VECTOR_BASE + 0xf);
}
u16 PIC::get_isr() const
{
IO::out8(PIC0_CTL, 0x0b);
IO::out8(PIC1_CTL, 0x0b);
u8 isr0 = IO::in8(PIC0_CTL);
u8 isr1 = IO::in8(PIC1_CTL);
return (isr1 << 8) | isr0;
}
u16 PIC::get_irr() const
{
IO::out8(PIC0_CTL, 0x0a);
IO::out8(PIC1_CTL, 0x0a);
u8 irr0 = IO::in8(PIC0_CTL);
u8 irr1 = IO::in8(PIC1_CTL);
return (irr1 << 8) | irr0;
}
}

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Arch/x86/IRQController.h>
namespace Kernel {
static constexpr size_t pic_disabled_vector_base = 0x20;
static constexpr size_t pic_disabled_vector_end = 0x2f;
class PIC final : public IRQController {
public:
PIC();
virtual void enable(GenericInterruptHandler const&) override;
virtual void disable(GenericInterruptHandler const&) override;
virtual void hard_disable() override;
virtual void eoi(GenericInterruptHandler const&) const override;
virtual bool is_vector_enabled(u8 number) const override;
virtual bool is_enabled() const override;
virtual void spurious_eoi(GenericInterruptHandler const&) const override;
virtual u16 get_isr() const override;
virtual u16 get_irr() const override;
virtual u32 gsi_base() const override { return 0; }
virtual size_t interrupt_vectors_count() const override { return 16; }
virtual StringView model() const override { return "Dual i8259"sv; }
virtual IRQControllerType type() const override { return IRQControllerType::i8259; }
private:
u16 m_cached_irq_mask { 0xffff };
void eoi_interrupt(u8 irq) const;
void enable_vector(u8 number);
void remap(u8 offset);
void complete_eoi() const;
virtual void initialize() override;
};
}

View file

@ -12,7 +12,7 @@
#include <AK/StringBuilder.h>
#include <AK/Types.h>
#include <Kernel/Interrupts/APIC.h>
#include <Kernel/Arch/x86/common/Interrupts/APIC.h>
#include <Kernel/Process.h>
#include <Kernel/Scheduler.h>
#include <Kernel/Sections.h>