mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 16:42:44 +00:00 
			
		
		
		
	 0c5d6c6c47
			
		
	
	
		0c5d6c6c47
		
	
	
	
	
		
			
			Add a helper initialize_interrupt_queue() helper to enable_irq instead of doing it as part of its object construction as it can fail. This is similar to how AHCI initializes its interrupt as well.
		
			
				
	
	
		
			93 lines
		
	
	
	
		
			3.8 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			93 lines
		
	
	
	
		
			3.8 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| /*
 | |
|  * Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
 | |
|  *
 | |
|  * SPDX-License-Identifier: BSD-2-Clause
 | |
|  */
 | |
| 
 | |
| #include <Kernel/Devices/BlockDevice.h>
 | |
| #include <Kernel/Storage/NVMe/NVMeDefinitions.h>
 | |
| #include <Kernel/Storage/NVMe/NVMeInterruptQueue.h>
 | |
| #include <Kernel/WorkQueue.h>
 | |
| 
 | |
| namespace Kernel {
 | |
| 
 | |
| ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> NVMeInterruptQueue::try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
 | |
| {
 | |
|     auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(device, move(rw_dma_region), rw_dma_page, qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
 | |
|     queue->initialize_interrupt_queue();
 | |
|     return queue;
 | |
| }
 | |
| 
 | |
| UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
 | |
|     : NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))
 | |
|     , PCIIRQHandler(device, irq)
 | |
| {
 | |
| }
 | |
| 
 | |
| void NVMeInterruptQueue::initialize_interrupt_queue()
 | |
| {
 | |
|     enable_irq();
 | |
| }
 | |
| 
 | |
| bool NVMeInterruptQueue::handle_irq(RegisterState const&)
 | |
| {
 | |
|     SpinlockLocker lock(m_request_lock);
 | |
|     return process_cq() ? true : false;
 | |
| }
 | |
| 
 | |
| void NVMeInterruptQueue::submit_sqe(NVMeSubmission& sub)
 | |
| {
 | |
|     NVMeQueue::submit_sqe(sub);
 | |
| }
 | |
| 
 | |
| void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
 | |
| {
 | |
|     auto work_item_creation_result = g_io_work->try_queue([this, cmdid, status]() {
 | |
|         SpinlockLocker lock(m_request_lock);
 | |
|         auto& request_pdu = m_requests.get(cmdid).release_value();
 | |
|         auto current_request = request_pdu.request;
 | |
|         AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
 | |
| 
 | |
|         ScopeGuard guard = [req_result, status, &request_pdu, &lock] {
 | |
|             // FIXME: We should unlock at the end of this function to make sure no new requests is inserted
 | |
|             //  before we complete the request and calling end_io_handler but that results in a deadlock
 | |
|             //  For now this is avoided by asserting the `used` field while inserting.
 | |
|             lock.unlock();
 | |
|             if (request_pdu.request)
 | |
|                 request_pdu.request->complete(req_result);
 | |
|             if (request_pdu.end_io_handler)
 | |
|                 request_pdu.end_io_handler(status);
 | |
|             request_pdu.used = false;
 | |
|         };
 | |
| 
 | |
|         // There can be submission without any request associated with it such as with
 | |
|         // admin queue commands during init. If there is no request, we are done
 | |
|         if (!current_request)
 | |
|             return;
 | |
| 
 | |
|         if (status) {
 | |
|             req_result = AsyncBlockDeviceRequest::Failure;
 | |
|             return;
 | |
|         }
 | |
| 
 | |
|         if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
 | |
|             if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
 | |
|                 req_result = AsyncBlockDeviceRequest::MemoryFault;
 | |
|                 return;
 | |
|             }
 | |
|         }
 | |
|         return;
 | |
|     });
 | |
| 
 | |
|     if (work_item_creation_result.is_error()) {
 | |
|         SpinlockLocker lock(m_request_lock);
 | |
|         auto& request_pdu = m_requests.get(cmdid).release_value();
 | |
|         auto current_request = request_pdu.request;
 | |
| 
 | |
|         current_request->complete(AsyncDeviceRequest::OutOfMemory);
 | |
|         if (request_pdu.end_io_handler)
 | |
|             request_pdu.end_io_handler(status);
 | |
|         request_pdu.used = false;
 | |
|     }
 | |
| }
 | |
| }
 |