1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-14 14:34:59 +00:00
serenity/Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.cpp
Idan Horowitz 45aee20ea9 Kernel: Switch to SpinlockProtected to protect NVMeQueue's requests map
This helps ensure no one accidentally accesses m_requests without first
locking it's spinlock. In fact this change fixed such a case, since
process_cq() implicitly assumed the caller locked the lock, which was
not the case for NVMePollQueue::submit_sqe().
2024-02-10 08:42:53 +01:00

60 lines
2.3 KiB
C++

/*
* Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Devices/Storage/NVMe/NVMeDefinitions.h>
#include <Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.h>
#include <Kernel/Tasks/WorkQueue.h>
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> NVMeInterruptQueue::try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
{
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(device, move(rw_dma_region), rw_dma_page, qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
queue->initialize_interrupt_queue();
return queue;
}
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))
, PCI::IRQHandler(device, irq)
{
}
void NVMeInterruptQueue::initialize_interrupt_queue()
{
enable_irq();
}
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
{
return process_cq() ? true : false;
}
void NVMeInterruptQueue::submit_sqe(NVMeSubmission& sub)
{
NVMeQueue::submit_sqe(sub);
}
void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
{
auto work_item_creation_result = g_io_work->try_queue([this, cmdid, status]() {
NVMeQueue::complete_current_request(cmdid, status);
});
if (work_item_creation_result.is_error()) {
m_requests.with([cmdid, status](auto& requests) {
auto& request_pdu = requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
current_request->complete(AsyncDeviceRequest::OutOfMemory);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
});
}
}
}