1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 09:48:11 +00:00

Kernel: Convert Processor::in_irq() to static current_in_irq()

This closes the race window between Processor::current() and a context
switch happening before in_irq().
This commit is contained in:
Andreas Kling 2021-08-22 12:21:31 +02:00
parent 3e3f760808
commit d60635cb9d
14 changed files with 37 additions and 39 deletions

View file

@ -120,7 +120,7 @@ class Processor {
u32 m_gdt_length; u32 m_gdt_length;
u32 m_cpu; u32 m_cpu;
u32 m_in_irq; FlatPtr m_in_irq {};
volatile u32 m_in_critical {}; volatile u32 m_in_critical {};
static Atomic<u32> s_idle_cpu_mask; static Atomic<u32> s_idle_cpu_mask;
@ -329,9 +329,9 @@ public:
return Processor::id() == 0; return Processor::id() == 0;
} }
ALWAYS_INLINE u32& in_irq() ALWAYS_INLINE static FlatPtr current_in_irq()
{ {
return m_in_irq; return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
} }
ALWAYS_INLINE static void restore_in_critical(u32 critical) ALWAYS_INLINE static void restore_in_critical(u32 critical)

View file

@ -288,7 +288,7 @@ void page_fault_handler(TrapFrame* trap)
bool faulted_in_kernel = !(regs.cs & 3); bool faulted_in_kernel = !(regs.cs & 3);
if (faulted_in_kernel && Processor::current().in_irq()) { if (faulted_in_kernel && Processor::current_in_irq()) {
// If we're faulting in an IRQ handler, first check if we failed // If we're faulting in an IRQ handler, first check if we failed
// due to safe_memcpy, safe_strnlen, or safe_memset. If we did, // due to safe_memcpy, safe_strnlen, or safe_memset. If we did,
// gracefully continue immediately. Because we're in an IRQ handler // gracefully continue immediately. Because we're in an IRQ handler

View file

@ -180,7 +180,7 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
{ {
VERIFY(!in_irq()); VERIFY(!m_in_irq);
VERIFY(m_in_critical == 1); VERIFY(m_in_critical == 1);
VERIFY(is_kernel_mode()); VERIFY(is_kernel_mode());

View file

@ -164,7 +164,7 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
{ {
VERIFY(!in_irq()); VERIFY(!m_in_irq);
VERIFY(m_in_critical == 1); VERIFY(m_in_critical == 1);
VERIFY(is_kernel_mode()); VERIFY(is_kernel_mode());

View file

@ -135,7 +135,7 @@ void AsyncDeviceRequest::complete(RequestResult result)
VERIFY(m_result == Started); VERIFY(m_result == Started);
m_result = result; m_result = result;
} }
if (Processor::current().in_irq()) { if (Processor::current_in_irq()) {
ref(); // Make sure we don't get freed ref(); // Make sure we don't get freed
Processor::deferred_call_queue([this]() { Processor::deferred_call_queue([this]() {
request_finished(); request_finished();

View file

@ -132,7 +132,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
bool I8042Controller::irq_process_input_buffer(HIDDevice::Type) bool I8042Controller::irq_process_input_buffer(HIDDevice::Type)
{ {
VERIFY(Processor::current().in_irq()); VERIFY(Processor::current_in_irq());
u8 status = IO::in8(I8042_STATUS); u8 status = IO::in8(I8042_STATUS);
if (!(status & I8042_BUFFER_FULL)) if (!(status & I8042_BUFFER_FULL))
@ -167,7 +167,7 @@ bool I8042Controller::do_reset_device(HIDDevice::Type device)
VERIFY(device != HIDDevice::Type::Unknown); VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
if (do_send_command(device, 0xff) != I8042_ACK) if (do_send_command(device, 0xff) != I8042_ACK)
return false; return false;
// Wait until we get the self-test result // Wait until we get the self-test result
@ -179,7 +179,7 @@ u8 I8042Controller::do_send_command(HIDDevice::Type device, u8 command)
VERIFY(device != HIDDevice::Type::Unknown); VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
return do_write_to_device(device, command); return do_write_to_device(device, command);
} }
@ -189,7 +189,7 @@ u8 I8042Controller::do_send_command(HIDDevice::Type device, u8 command, u8 data)
VERIFY(device != HIDDevice::Type::Unknown); VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
u8 response = do_write_to_device(device, command); u8 response = do_write_to_device(device, command);
if (response == I8042_ACK) if (response == I8042_ACK)
@ -202,7 +202,7 @@ u8 I8042Controller::do_write_to_device(HIDDevice::Type device, u8 data)
VERIFY(device != HIDDevice::Type::Unknown); VERIFY(device != HIDDevice::Type::Unknown);
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
int attempts = 0; int attempts = 0;
u8 response; u8 response;

View file

@ -121,7 +121,7 @@ protected:
void evaluate_block_conditions() void evaluate_block_conditions()
{ {
if (Processor::current().in_irq()) { if (Processor::current_in_irq()) {
// If called from an IRQ handler we need to delay evaluation // If called from an IRQ handler we need to delay evaluation
// and unblocking of waiting threads. Note that this File // and unblocking of waiting threads. Note that this File
// instance may be deleted until the deferred call is executed! // instance may be deleted until the deferred call is executed!
@ -137,7 +137,7 @@ protected:
private: private:
ALWAYS_INLINE void do_evaluate_block_conditions() ALWAYS_INLINE void do_evaluate_block_conditions()
{ {
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
block_condition().unblock(); block_condition().unblock();
} }

View file

@ -17,7 +17,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
{ {
// NOTE: This may be called from an interrupt handler (not an IRQ handler) // NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections! // and also from within critical sections!
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
VERIFY(mode != Mode::Unlocked); VERIFY(mode != Mode::Unlocked);
auto current_thread = Thread::current(); auto current_thread = Thread::current();
@ -143,7 +143,7 @@ void Mutex::unlock()
{ {
// NOTE: This may be called from an interrupt handler (not an IRQ handler) // NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections! // and also from within critical sections!
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
SpinlockLocker lock(m_lock); SpinlockLocker lock(m_lock);
Mode current_mode = m_mode; Mode current_mode = m_mode;
@ -253,7 +253,7 @@ auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
{ {
// NOTE: This may be called from an interrupt handler (not an IRQ handler) // NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections! // and also from within critical sections!
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
SpinlockLocker lock(m_lock); SpinlockLocker lock(m_lock);
auto current_mode = m_mode; auto current_mode = m_mode;
@ -316,7 +316,7 @@ void Mutex::restore_lock(Mode mode, u32 lock_count, [[maybe_unused]] LockLocatio
{ {
VERIFY(mode != Mode::Unlocked); VERIFY(mode != Mode::Unlocked);
VERIFY(lock_count > 0); VERIFY(lock_count > 0);
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
bool did_block = false; bool did_block = false;
SpinlockLocker lock(m_lock); SpinlockLocker lock(m_lock);

View file

@ -685,9 +685,9 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
if (Processor::current().in_irq()) { if (Processor::current_in_irq()) {
dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}", dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
Processor::id(), fault.code(), fault.vaddr(), Processor::current().in_irq()); Processor::id(), fault.code(), fault.vaddr(), Processor::current_in_irq());
dump_kernel_regions(); dump_kernel_regions();
return PageFaultResponse::ShouldCrash; return PageFaultResponse::ShouldCrash;
} }

View file

@ -17,7 +17,7 @@ void __sanitizer_cov_trace_pc(void)
if (g_in_early_boot) [[unlikely]] if (g_in_early_boot) [[unlikely]]
return; return;
if (Processor::current().in_irq()) [[unlikely]] { if (Processor::current_in_irq()) [[unlikely]] {
// Do not trace in interrupts. // Do not trace in interrupts.
return; return;
} }

View file

@ -252,16 +252,15 @@ bool Scheduler::pick_next()
bool Scheduler::yield() bool Scheduler::yield()
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
auto& proc = Processor::current();
auto current_thread = Thread::current(); auto current_thread = Thread::current();
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq()); dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", Processor::id(), *current_thread, Processor::current_in_irq());
VERIFY(current_thread != nullptr); VERIFY(current_thread != nullptr);
if (proc.in_irq() || Processor::in_critical()) { if (Processor::current_in_irq() || Processor::in_critical()) {
// If we're handling an IRQ we can't switch context, or we're in // If we're handling an IRQ we can't switch context, or we're in
// a critical section where we don't want to switch contexts, then // a critical section where we don't want to switch contexts, then
// delay until exiting the trap or critical section // delay until exiting the trap or critical section
proc.invoke_scheduler_async(); Processor::current().invoke_scheduler_async();
return false; return false;
} }
@ -269,7 +268,7 @@ bool Scheduler::yield()
return false; return false;
if constexpr (SCHEDULER_DEBUG) if constexpr (SCHEDULER_DEBUG)
dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current().in_irq()); dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::id(), *current_thread, Processor::current_in_irq());
return true; return true;
} }
@ -462,7 +461,7 @@ void Scheduler::add_time_scheduled(u64 time_to_add, bool is_kernel)
void Scheduler::timer_tick(const RegisterState& regs) void Scheduler::timer_tick(const RegisterState& regs)
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_irq()); VERIFY(Processor::current_in_irq());
auto current_thread = Processor::current_thread(); auto current_thread = Processor::current_thread();
if (!current_thread) if (!current_thread)
@ -506,15 +505,14 @@ void Scheduler::timer_tick(const RegisterState& regs)
} }
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_irq()); VERIFY(Processor::current_in_irq());
Processor::current().invoke_scheduler_async(); Processor::current().invoke_scheduler_async();
} }
void Scheduler::invoke_async() void Scheduler::invoke_async()
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
auto& processor = Processor::current(); VERIFY(!Processor::current_in_irq());
VERIFY(!processor.in_irq());
// Since this function is called when leaving critical sections (such // Since this function is called when leaving critical sections (such
// as a Spinlock), we need to check if we're not already doing this // as a Spinlock), we need to check if we're not already doing this

View file

@ -157,7 +157,7 @@ Thread::~Thread()
void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock, u32 lock_count) void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock, u32 lock_count)
{ {
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current()); VERIFY(this == Thread::current());
ScopedCritical critical; ScopedCritical critical;
VERIFY(!Memory::s_mm_lock.own_lock()); VERIFY(!Memory::s_mm_lock.own_lock());
@ -238,7 +238,7 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
SpinlockLocker scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
SpinlockLocker block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock); VERIFY(m_blocking_lock == &lock);
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
VERIFY(g_scheduler_lock.own_lock()); VERIFY(g_scheduler_lock.own_lock());
VERIFY(m_block_lock.own_lock()); VERIFY(m_block_lock.own_lock());
VERIFY(m_blocking_lock == &lock); VERIFY(m_blocking_lock == &lock);
@ -251,7 +251,7 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
VERIFY(m_state != Thread::Runnable && m_state != Thread::Running); VERIFY(m_state != Thread::Runnable && m_state != Thread::Running);
set_state(Thread::Runnable); set_state(Thread::Runnable);
}; };
if (Processor::current().in_irq()) { if (Processor::current_in_irq()) {
Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
if (auto this_thread = self.strong_ref()) if (auto this_thread = self.strong_ref())
do_unblock(); do_unblock();
@ -272,7 +272,7 @@ void Thread::unblock_from_blocker(Blocker& blocker)
if (!should_be_stopped() && !is_stopped()) if (!should_be_stopped() && !is_stopped())
unblock(); unblock();
}; };
if (Processor::current().in_irq()) { if (Processor::current_in_irq()) {
Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
if (auto this_thread = self.strong_ref()) if (auto this_thread = self.strong_ref())
do_unblock(); do_unblock();
@ -284,7 +284,7 @@ void Thread::unblock_from_blocker(Blocker& blocker)
void Thread::unblock(u8 signal) void Thread::unblock(u8 signal)
{ {
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
VERIFY(g_scheduler_lock.own_lock()); VERIFY(g_scheduler_lock.own_lock());
VERIFY(m_block_lock.own_lock()); VERIFY(m_block_lock.own_lock());
if (m_state != Thread::Blocked) if (m_state != Thread::Blocked)
@ -377,7 +377,7 @@ void Thread::die_if_needed()
// Now leave the critical section so that we can also trigger the // Now leave the critical section so that we can also trigger the
// actual context switch // actual context switch
Processor::clear_critical(); Processor::clear_critical();
dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq()); dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current_in_irq());
// We should never get here, but the scoped scheduler lock // We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again // will be released by Scheduler::context_switch again
VERIFY_NOT_REACHED(); VERIFY_NOT_REACHED();

View file

@ -847,7 +847,7 @@ public:
template<typename BlockerType, class... Args> template<typename BlockerType, class... Args>
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{ {
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current()); VERIFY(this == Thread::current());
ScopedCritical critical; ScopedCritical critical;
VERIFY(!Memory::s_mm_lock.own_lock()); VERIFY(!Memory::s_mm_lock.own_lock());
@ -889,7 +889,7 @@ public:
// Process::kill_all_threads may be called at any time, which will mark all // Process::kill_all_threads may be called at any time, which will mark all
// threads to die. In that case // threads to die. In that case
timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() { timer_was_added = TimerQueue::the().add_timer_without_id(*m_block_timer, block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current_in_irq());
VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!g_scheduler_lock.own_lock());
VERIFY(!m_block_lock.own_lock()); VERIFY(!m_block_lock.own_lock());
// NOTE: this may execute on the same or any other processor! // NOTE: this may execute on the same or any other processor!

View file

@ -403,7 +403,7 @@ void TimeManagement::increment_time_since_boot()
void TimeManagement::system_timer_tick(const RegisterState& regs) void TimeManagement::system_timer_tick(const RegisterState& regs)
{ {
if (Processor::current().in_irq() <= 1) { if (Processor::current_in_irq() <= 1) {
// Don't expire timers while handling IRQs // Don't expire timers while handling IRQs
TimerQueue::the().fire(); TimerQueue::the().fire();
} }