From 1462211ccf5b0d823bd0b518a7d7ac0e1d31ea1c Mon Sep 17 00:00:00 2001 From: Liav A Date: Sun, 6 Mar 2022 22:07:04 +0200 Subject: [PATCH] Kernel: Allow WorkQueue items allocation failures propagation In most cases it's safe to abort the requested operation and go forward, however, in some places it's not clear yet how to handle these failures, therefore, we use the MUST() wrapper to force a kernel panic for now. --- Kernel/Bus/VirtIO/Console.cpp | 5 ++-- Kernel/Devices/AsyncDeviceRequest.h | 1 + Kernel/Devices/HID/PS2KeyboardDevice.cpp | 5 ++-- Kernel/Graphics/VirtIOGPU/Console.cpp | 5 ++-- Kernel/Storage/ATA/AHCIPort.cpp | 35 ++++++++++++++++++---- Kernel/Storage/ATA/BMIDEChannel.cpp | 7 ++++- Kernel/Storage/ATA/IDEChannel.cpp | 14 +++++++-- Kernel/Storage/NVMe/NVMeInterruptQueue.cpp | 7 ++++- Kernel/WorkQueue.h | 15 +++++++--- 9 files changed, 75 insertions(+), 19 deletions(-) diff --git a/Kernel/Bus/VirtIO/Console.cpp b/Kernel/Bus/VirtIO/Console.cpp index 6aa6bde2bc..cc20d43bef 100644 --- a/Kernel/Bus/VirtIO/Console.cpp +++ b/Kernel/Bus/VirtIO/Console.cpp @@ -151,7 +151,8 @@ void Console::process_control_message(ControlMessage message) { switch (message.event) { case (u16)ControlEvent::DeviceAdd: { - g_io_work->queue([message, this]() -> void { + // FIXME: Do something sanely here if we can't allocate a work queue? + MUST(g_io_work->try_queue([message, this]() -> void { u32 id = message.id; if (id >= m_ports.size()) { dbgln("Device provided an invalid port number {}. max_nr_ports: {}", id, m_ports.size()); @@ -172,7 +173,7 @@ void Console::process_control_message(ControlMessage message) .value = (u16)ControlMessage::Status::Success }; write_control_message(ready_event); - }); + })); break; } diff --git a/Kernel/Devices/AsyncDeviceRequest.h b/Kernel/Devices/AsyncDeviceRequest.h index 4aa7c2ff89..bc7376e55d 100644 --- a/Kernel/Devices/AsyncDeviceRequest.h +++ b/Kernel/Devices/AsyncDeviceRequest.h @@ -31,6 +31,7 @@ public: Success, Failure, MemoryFault, + OutOfMemory, Cancelled }; diff --git a/Kernel/Devices/HID/PS2KeyboardDevice.cpp b/Kernel/Devices/HID/PS2KeyboardDevice.cpp index 8dcd080e21..cbad9a434f 100644 --- a/Kernel/Devices/HID/PS2KeyboardDevice.cpp +++ b/Kernel/Devices/HID/PS2KeyboardDevice.cpp @@ -71,9 +71,10 @@ void PS2KeyboardDevice::irq_handle_byte_read(u8 byte) break; default: if ((m_modifiers & Mod_Alt) != 0 && ch >= 2 && ch <= ConsoleManagement::s_max_virtual_consoles + 1) { - g_io_work->queue([ch]() { + // FIXME: Do something sanely here if we can't allocate a work queue? + MUST(g_io_work->try_queue([ch]() { ConsoleManagement::the().switch_to(ch - 0x02); - }); + })); } key_state_changed(ch, pressed); } diff --git a/Kernel/Graphics/VirtIOGPU/Console.cpp b/Kernel/Graphics/VirtIOGPU/Console.cpp index 38d7a63a83..15f97206d1 100644 --- a/Kernel/Graphics/VirtIOGPU/Console.cpp +++ b/Kernel/Graphics/VirtIOGPU/Console.cpp @@ -65,10 +65,11 @@ void Console::enqueue_refresh_timer() .width = (u32)rect.width(), .height = (u32)rect.height(), }; - g_io_work->queue([this, dirty_rect]() { + // FIXME: Do something sanely here if we can't allocate a work queue? + MUST(g_io_work->try_queue([this, dirty_rect]() { m_framebuffer_device->flush_dirty_window(dirty_rect, m_framebuffer_device->current_buffer()); m_dirty_rect.clear(); - }); + })); } enqueue_refresh_timer(); }); diff --git a/Kernel/Storage/ATA/AHCIPort.cpp b/Kernel/Storage/ATA/AHCIPort.cpp index 24df2a7383..f72d307cac 100644 --- a/Kernel/Storage/ATA/AHCIPort.cpp +++ b/Kernel/Storage/ATA/AHCIPort.cpp @@ -70,13 +70,23 @@ void AHCIPort::handle_interrupt() if ((m_port_registers.ssts & 0xf) != 3 && m_connected_device) { m_connected_device->prepare_for_unplug(); StorageManagement::the().remove_device(*m_connected_device); - g_io_work->queue([this]() { + auto work_item_creation_result = g_io_work->try_queue([this]() { m_connected_device.clear(); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } } else { - g_io_work->queue([this]() { + auto work_item_creation_result = g_io_work->try_queue([this]() { reset(); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } } return; } @@ -86,15 +96,25 @@ void AHCIPort::handle_interrupt() if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::INF)) { // We need to defer the reset, because we can receive interrupts when // resetting the device. - g_io_work->queue([this]() { + auto work_item_creation_result = g_io_work->try_queue([this]() { reset(); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } return; } if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::IF) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::TFE) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::HBD) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::HBF)) { - g_io_work->queue([this]() { + auto work_item_creation_result = g_io_work->try_queue([this]() { recover_from_fatal_error(); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } return; } if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::DHR) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::PS)) { @@ -106,7 +126,7 @@ void AHCIPort::handle_interrupt() if (!m_current_request) { dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled, probably identify request", representative_port_index()); } else { - g_io_work->queue([this]() { + auto work_item_creation_result = g_io_work->try_queue([this]() { dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled", representative_port_index()); MutexLocker locker(m_lock); VERIFY(m_current_request); @@ -128,6 +148,11 @@ void AHCIPort::handle_interrupt() dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request success", representative_port_index()); complete_current_request(AsyncDeviceRequest::Success); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } } } diff --git a/Kernel/Storage/ATA/BMIDEChannel.cpp b/Kernel/Storage/ATA/BMIDEChannel.cpp index 2e6e40ce22..e938669a1a 100644 --- a/Kernel/Storage/ATA/BMIDEChannel.cpp +++ b/Kernel/Storage/ATA/BMIDEChannel.cpp @@ -121,7 +121,7 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re // This is important so that we can safely write the buffer back, // which could cause page faults. Note that this may be called immediately // before Processor::deferred_call_queue returns! - g_io_work->queue([this, result]() { + auto work_item_creation_result = g_io_work->try_queue([this, result]() { dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result); SpinlockLocker lock(m_request_lock); VERIFY(m_current_request); @@ -145,6 +145,11 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re lock.unlock(); current_request->complete(result); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } } void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities) diff --git a/Kernel/Storage/ATA/IDEChannel.cpp b/Kernel/Storage/ATA/IDEChannel.cpp index d01dd5a65e..e2889f68be 100644 --- a/Kernel/Storage/ATA/IDEChannel.cpp +++ b/Kernel/Storage/ATA/IDEChannel.cpp @@ -129,7 +129,7 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu // This is important so that we can safely write the buffer back, // which could cause page faults. Note that this may be called immediately // before Processor::deferred_call_queue returns! - g_io_work->queue([this, result]() { + auto work_item_creation_result = g_io_work->try_queue([this, result]() { dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result); MutexLocker locker(m_lock); VERIFY(m_current_request); @@ -137,6 +137,11 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu m_current_request.clear(); current_request->complete(result); }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } } static void print_ide_status(u8 status) @@ -218,7 +223,7 @@ bool IDEChannel::handle_irq(RegisterState const&) // Now schedule reading/writing the buffer as soon as we leave the irq handler. // This is important so that we can safely access the buffers, which could // trigger page faults - g_io_work->queue([this]() { + auto work_item_creation_result = g_io_work->try_queue([this]() { MutexLocker locker(m_lock); SpinlockLocker lock(m_request_lock); if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) { @@ -249,6 +254,11 @@ bool IDEChannel::handle_irq(RegisterState const&) } } }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } return true; } diff --git a/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp b/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp index 95842165cb..105c4a6251 100644 --- a/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp +++ b/Kernel/Storage/NVMe/NVMeInterruptQueue.cpp @@ -33,7 +33,7 @@ void NVMeInterruptQueue::complete_current_request(u16 status) { VERIFY(m_request_lock.is_locked()); - g_io_work->queue([this, status]() { + auto work_item_creation_result = g_io_work->try_queue([this, status]() { SpinlockLocker lock(m_request_lock); auto current_request = m_current_request; m_current_request.clear(); @@ -53,5 +53,10 @@ void NVMeInterruptQueue::complete_current_request(u16 status) current_request->complete(AsyncDeviceRequest::Success); return; }); + if (work_item_creation_result.is_error()) { + auto current_request = m_current_request; + m_current_request.clear(); + current_request->complete(AsyncDeviceRequest::OutOfMemory); + } } } diff --git a/Kernel/WorkQueue.h b/Kernel/WorkQueue.h index 3337a1a290..a2f1b88584 100644 --- a/Kernel/WorkQueue.h +++ b/Kernel/WorkQueue.h @@ -7,6 +7,7 @@ #pragma once +#include #include #include #include @@ -23,23 +24,29 @@ class WorkQueue { public: static void initialize(); - void queue(void (*function)(void*), void* data = nullptr, void (*free_data)(void*) = nullptr) + ErrorOr try_queue(void (*function)(void*), void* data = nullptr, void (*free_data)(void*) = nullptr) { - auto* item = new WorkItem; // TODO: use a pool + auto item = new (nothrow) WorkItem; // TODO: use a pool + if (!item) + return Error::from_errno(ENOMEM); item->function = [function, data, free_data] { function(data); if (free_data) free_data(data); }; do_queue(item); + return {}; } template - void queue(Function function) + ErrorOr try_queue(Function function) { - auto* item = new WorkItem; // TODO: use a pool + auto item = new (nothrow) WorkItem; // TODO: use a pool + if (!item) + return Error::from_errno(ENOMEM); item->function = Function(function); do_queue(item); + return {}; } private: