1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 16:17:45 +00:00

Kernel: Allow WorkQueue items allocation failures propagation

In most cases it's safe to abort the requested operation and go forward,
however, in some places it's not clear yet how to handle these failures,
therefore, we use the MUST() wrapper to force a kernel panic for now.
This commit is contained in:
Liav A 2022-03-06 22:07:04 +02:00 committed by Linus Groh
parent 02566d8091
commit 1462211ccf
9 changed files with 75 additions and 19 deletions

View file

@ -151,7 +151,8 @@ void Console::process_control_message(ControlMessage message)
{
switch (message.event) {
case (u16)ControlEvent::DeviceAdd: {
g_io_work->queue([message, this]() -> void {
// FIXME: Do something sanely here if we can't allocate a work queue?
MUST(g_io_work->try_queue([message, this]() -> void {
u32 id = message.id;
if (id >= m_ports.size()) {
dbgln("Device provided an invalid port number {}. max_nr_ports: {}", id, m_ports.size());
@ -172,7 +173,7 @@ void Console::process_control_message(ControlMessage message)
.value = (u16)ControlMessage::Status::Success
};
write_control_message(ready_event);
});
}));
break;
}

View file

@ -31,6 +31,7 @@ public:
Success,
Failure,
MemoryFault,
OutOfMemory,
Cancelled
};

View file

@ -71,9 +71,10 @@ void PS2KeyboardDevice::irq_handle_byte_read(u8 byte)
break;
default:
if ((m_modifiers & Mod_Alt) != 0 && ch >= 2 && ch <= ConsoleManagement::s_max_virtual_consoles + 1) {
g_io_work->queue([ch]() {
// FIXME: Do something sanely here if we can't allocate a work queue?
MUST(g_io_work->try_queue([ch]() {
ConsoleManagement::the().switch_to(ch - 0x02);
});
}));
}
key_state_changed(ch, pressed);
}

View file

@ -65,10 +65,11 @@ void Console::enqueue_refresh_timer()
.width = (u32)rect.width(),
.height = (u32)rect.height(),
};
g_io_work->queue([this, dirty_rect]() {
// FIXME: Do something sanely here if we can't allocate a work queue?
MUST(g_io_work->try_queue([this, dirty_rect]() {
m_framebuffer_device->flush_dirty_window(dirty_rect, m_framebuffer_device->current_buffer());
m_dirty_rect.clear();
});
}));
}
enqueue_refresh_timer();
});

View file

@ -70,13 +70,23 @@ void AHCIPort::handle_interrupt()
if ((m_port_registers.ssts & 0xf) != 3 && m_connected_device) {
m_connected_device->prepare_for_unplug();
StorageManagement::the().remove_device(*m_connected_device);
g_io_work->queue([this]() {
auto work_item_creation_result = g_io_work->try_queue([this]() {
m_connected_device.clear();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
} else {
g_io_work->queue([this]() {
auto work_item_creation_result = g_io_work->try_queue([this]() {
reset();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
return;
}
@ -86,15 +96,25 @@ void AHCIPort::handle_interrupt()
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::INF)) {
// We need to defer the reset, because we can receive interrupts when
// resetting the device.
g_io_work->queue([this]() {
auto work_item_creation_result = g_io_work->try_queue([this]() {
reset();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return;
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::IF) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::TFE) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::HBD) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::HBF)) {
g_io_work->queue([this]() {
auto work_item_creation_result = g_io_work->try_queue([this]() {
recover_from_fatal_error();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return;
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::DHR) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::PS)) {
@ -106,7 +126,7 @@ void AHCIPort::handle_interrupt()
if (!m_current_request) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled, probably identify request", representative_port_index());
} else {
g_io_work->queue([this]() {
auto work_item_creation_result = g_io_work->try_queue([this]() {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled", representative_port_index());
MutexLocker locker(m_lock);
VERIFY(m_current_request);
@ -128,6 +148,11 @@ void AHCIPort::handle_interrupt()
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request success", representative_port_index());
complete_current_request(AsyncDeviceRequest::Success);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
}

View file

@ -121,7 +121,7 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
g_io_work->queue([this, result]() {
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
SpinlockLocker lock(m_request_lock);
VERIFY(m_current_request);
@ -145,6 +145,11 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re
lock.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)

View file

@ -129,7 +129,7 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
g_io_work->queue([this, result]() {
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
@ -137,6 +137,11 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu
m_current_request.clear();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
static void print_ide_status(u8 status)
@ -218,7 +223,7 @@ bool IDEChannel::handle_irq(RegisterState const&)
// Now schedule reading/writing the buffer as soon as we leave the irq handler.
// This is important so that we can safely access the buffers, which could
// trigger page faults
g_io_work->queue([this]() {
auto work_item_creation_result = g_io_work->try_queue([this]() {
MutexLocker locker(m_lock);
SpinlockLocker lock(m_request_lock);
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
@ -249,6 +254,11 @@ bool IDEChannel::handle_irq(RegisterState const&)
}
}
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return true;
}

View file

@ -33,7 +33,7 @@ void NVMeInterruptQueue::complete_current_request(u16 status)
{
VERIFY(m_request_lock.is_locked());
g_io_work->queue([this, status]() {
auto work_item_creation_result = g_io_work->try_queue([this, status]() {
SpinlockLocker lock(m_request_lock);
auto current_request = m_current_request;
m_current_request.clear();
@ -53,5 +53,10 @@ void NVMeInterruptQueue::complete_current_request(u16 status)
current_request->complete(AsyncDeviceRequest::Success);
return;
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
}

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/Error.h>
#include <AK/IntrusiveList.h>
#include <Kernel/Forward.h>
#include <Kernel/Locking/SpinlockProtected.h>
@ -23,23 +24,29 @@ class WorkQueue {
public:
static void initialize();
void queue(void (*function)(void*), void* data = nullptr, void (*free_data)(void*) = nullptr)
ErrorOr<void> try_queue(void (*function)(void*), void* data = nullptr, void (*free_data)(void*) = nullptr)
{
auto* item = new WorkItem; // TODO: use a pool
auto item = new (nothrow) WorkItem; // TODO: use a pool
if (!item)
return Error::from_errno(ENOMEM);
item->function = [function, data, free_data] {
function(data);
if (free_data)
free_data(data);
};
do_queue(item);
return {};
}
template<typename Function>
void queue(Function function)
ErrorOr<void> try_queue(Function function)
{
auto* item = new WorkItem; // TODO: use a pool
auto item = new (nothrow) WorkItem; // TODO: use a pool
if (!item)
return Error::from_errno(ENOMEM);
item->function = Function(function);
do_queue(item);
return {};
}
private: