mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 18:27:35 +00:00
Kernel: Add individual struct definitions for NVMeSubmission
Only a generic struct definition was present for NVMeSubmission. To improve type safety and clarity, added an union of NVMeSubmission structs that are applicable to the command being submitted.
This commit is contained in:
parent
ba7846647c
commit
567b3a4810
5 changed files with 94 additions and 39 deletions
|
@ -42,6 +42,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize()
|
||||||
PCI::enable_bus_mastering(m_pci_device_id.address());
|
PCI::enable_bus_mastering(m_pci_device_id.address());
|
||||||
m_bar = PCI::get_BAR0(m_pci_device_id.address()) & BAR_ADDR_MASK;
|
m_bar = PCI::get_BAR0(m_pci_device_id.address()) & BAR_ADDR_MASK;
|
||||||
static_assert(sizeof(ControllerRegister) == REG_SQ0TDBL_START);
|
static_assert(sizeof(ControllerRegister) == REG_SQ0TDBL_START);
|
||||||
|
static_assert(sizeof(NVMeSubmission) == (1 << SQ_WIDTH));
|
||||||
|
|
||||||
// Map only until doorbell register for the controller
|
// Map only until doorbell register for the controller
|
||||||
// Queues will individually map the doorbell register respectively
|
// Queues will individually map the doorbell register respectively
|
||||||
|
@ -163,8 +164,8 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
|
||||||
NVMeSubmission sub {};
|
NVMeSubmission sub {};
|
||||||
u16 status = 0;
|
u16 status = 0;
|
||||||
sub.op = OP_ADMIN_IDENTIFY;
|
sub.op = OP_ADMIN_IDENTIFY;
|
||||||
sub.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(prp_dma_buffer->paddr().as_ptr()));
|
sub.identify.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(prp_dma_buffer->paddr().as_ptr()));
|
||||||
sub.cdw10 = NVMe_CNS_ID_ACTIVE_NS & 0xff;
|
sub.identify.cns = NVMe_CNS_ID_ACTIVE_NS & 0xff;
|
||||||
status = submit_admin_command(sub, true);
|
status = submit_admin_command(sub, true);
|
||||||
if (status) {
|
if (status) {
|
||||||
dmesgln("Failed to identify active namespace command");
|
dmesgln("Failed to identify active namespace command");
|
||||||
|
@ -185,9 +186,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
|
||||||
if (nsid == 0)
|
if (nsid == 0)
|
||||||
break;
|
break;
|
||||||
sub.op = OP_ADMIN_IDENTIFY;
|
sub.op = OP_ADMIN_IDENTIFY;
|
||||||
sub.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(prp_dma_buffer->paddr().as_ptr()));
|
sub.identify.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(prp_dma_buffer->paddr().as_ptr()));
|
||||||
sub.cdw10 = NVMe_CNS_ID_NS & 0xff;
|
sub.identify.cns = NVMe_CNS_ID_NS & 0xff;
|
||||||
sub.nsid = nsid;
|
sub.identify.nsid = nsid;
|
||||||
status = submit_admin_command(sub, true);
|
status = submit_admin_command(sub, true);
|
||||||
if (status) {
|
if (status) {
|
||||||
dmesgln("Failed identify namespace with nsid {}", nsid);
|
dmesgln("Failed identify namespace with nsid {}", nsid);
|
||||||
|
@ -296,7 +297,6 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(u8 irq)
|
||||||
|
|
||||||
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
|
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
|
||||||
{
|
{
|
||||||
NVMeSubmission sub {};
|
|
||||||
OwnPtr<Memory::Region> cq_dma_region;
|
OwnPtr<Memory::Region> cq_dma_region;
|
||||||
NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
|
NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
|
||||||
OwnPtr<Memory::Region> sq_dma_region;
|
OwnPtr<Memory::Region> sq_dma_region;
|
||||||
|
@ -304,8 +304,6 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
|
||||||
auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
|
auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
|
||||||
auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);
|
auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);
|
||||||
|
|
||||||
static_assert(sizeof(NVMeSubmission) == (1 << SQ_WIDTH));
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "IO CQ queue", Memory::Region::Access::ReadWrite, cq_dma_pages));
|
auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "IO CQ queue", Memory::Region::Access::ReadWrite, cq_dma_pages));
|
||||||
cq_dma_region = move(buffer);
|
cq_dma_region = move(buffer);
|
||||||
|
@ -321,25 +319,29 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
NVMeSubmission sub {};
|
||||||
sub.op = OP_ADMIN_CREATE_COMPLETION_QUEUE;
|
sub.op = OP_ADMIN_CREATE_COMPLETION_QUEUE;
|
||||||
sub.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first().paddr().as_ptr()));
|
sub.create_cq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first().paddr().as_ptr()));
|
||||||
|
sub.create_cq.cqid = qid;
|
||||||
// The queue size is 0 based
|
// The queue size is 0 based
|
||||||
sub.cdw10 = AK::convert_between_host_and_little_endian(((IO_QUEUE_SIZE - 1) << 16 | qid));
|
sub.create_cq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
|
||||||
auto flags = QUEUE_IRQ_ENABLED | QUEUE_PHY_CONTIGUOUS;
|
auto flags = QUEUE_IRQ_ENABLED | QUEUE_PHY_CONTIGUOUS;
|
||||||
// TODO: Eventually move to MSI.
|
// TODO: Eventually move to MSI.
|
||||||
// For now using pin based interrupts. Clear the first 16 bits
|
// For now using pin based interrupts. Clear the first 16 bits
|
||||||
// to use pin-based interrupts.
|
// to use pin-based interrupts.
|
||||||
sub.cdw11 = AK::convert_between_host_and_little_endian(flags & 0xFFFF);
|
sub.create_cq.cq_flags = AK::convert_between_host_and_little_endian(flags & 0xFFFF);
|
||||||
submit_admin_command(sub, true);
|
submit_admin_command(sub, true);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
NVMeSubmission sub {};
|
||||||
sub.op = OP_ADMIN_CREATE_SUBMISSION_QUEUE;
|
sub.op = OP_ADMIN_CREATE_SUBMISSION_QUEUE;
|
||||||
sub.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first().paddr().as_ptr()));
|
sub.create_sq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first().paddr().as_ptr()));
|
||||||
|
sub.create_sq.sqid = qid;
|
||||||
// The queue size is 0 based
|
// The queue size is 0 based
|
||||||
sub.cdw10 = AK::convert_between_host_and_little_endian(((IO_QUEUE_SIZE - 1) << 16 | qid));
|
sub.create_sq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
|
||||||
auto flags = QUEUE_IRQ_ENABLED | QUEUE_PHY_CONTIGUOUS;
|
auto flags = QUEUE_IRQ_ENABLED | QUEUE_PHY_CONTIGUOUS;
|
||||||
// The qid used below points to the completion queue qid
|
sub.create_sq.cqid = qid;
|
||||||
sub.cdw11 = AK::convert_between_host_and_little_endian(qid << 16 | flags);
|
sub.create_sq.sq_flags = AK::convert_between_host_and_little_endian(flags);
|
||||||
submit_admin_command(sub, true);
|
submit_admin_command(sub, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ public:
|
||||||
bool start_controller();
|
bool start_controller();
|
||||||
u32 get_admin_q_dept();
|
u32 get_admin_q_dept();
|
||||||
|
|
||||||
u16 submit_admin_command(struct NVMeSubmission& sub, bool sync = false)
|
u16 submit_admin_command(NVMeSubmission& sub, bool sync = false)
|
||||||
{
|
{
|
||||||
// First queue is always the admin queue
|
// First queue is always the admin queue
|
||||||
if (sync) {
|
if (sync) {
|
||||||
|
|
|
@ -9,9 +9,6 @@
|
||||||
#include <AK/Endian.h>
|
#include <AK/Endian.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
|
|
||||||
struct NVMeCompletion;
|
|
||||||
struct NVMeSubmission;
|
|
||||||
|
|
||||||
struct ControllerRegister {
|
struct ControllerRegister {
|
||||||
u64 cap;
|
u64 cap;
|
||||||
u32 vs;
|
u32 vs;
|
||||||
|
@ -129,7 +126,7 @@ enum IOCommandOpcode {
|
||||||
static constexpr u8 QUEUE_PHY_CONTIGUOUS = (1 << 0);
|
static constexpr u8 QUEUE_PHY_CONTIGUOUS = (1 << 0);
|
||||||
static constexpr u8 QUEUE_IRQ_ENABLED = (1 << 1);
|
static constexpr u8 QUEUE_IRQ_ENABLED = (1 << 1);
|
||||||
|
|
||||||
struct NVMeCompletion {
|
struct [[gnu::packed]] NVMeCompletion {
|
||||||
LittleEndian<u32> cmd_spec;
|
LittleEndian<u32> cmd_spec;
|
||||||
LittleEndian<u32> res;
|
LittleEndian<u32> res;
|
||||||
|
|
||||||
|
@ -140,18 +137,15 @@ struct NVMeCompletion {
|
||||||
LittleEndian<u16> status; /* did the command fail, and if so, why? */
|
LittleEndian<u16> status; /* did the command fail, and if so, why? */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct DataPtr {
|
struct [[gnu::packed]] DataPtr {
|
||||||
LittleEndian<u64> prp1;
|
LittleEndian<u64> prp1;
|
||||||
LittleEndian<u64> prp2;
|
LittleEndian<u64> prp2;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NVMeSubmission {
|
struct [[gnu::packed]] NVMeGenericCmd {
|
||||||
LittleEndian<u8> op;
|
|
||||||
LittleEndian<u8> flags;
|
|
||||||
LittleEndian<u16> cmdid;
|
|
||||||
LittleEndian<u32> nsid;
|
LittleEndian<u32> nsid;
|
||||||
LittleEndian<u64> rsvd;
|
LittleEndian<u64> rsvd;
|
||||||
LittleEndian<u64> meta_ptr;
|
LittleEndian<u64> metadata;
|
||||||
struct DataPtr data_ptr;
|
struct DataPtr data_ptr;
|
||||||
LittleEndian<u32> cdw10;
|
LittleEndian<u32> cdw10;
|
||||||
LittleEndian<u32> cdw11;
|
LittleEndian<u32> cdw11;
|
||||||
|
@ -160,3 +154,64 @@ struct NVMeSubmission {
|
||||||
LittleEndian<u32> cdw14;
|
LittleEndian<u32> cdw14;
|
||||||
LittleEndian<u32> cdw15;
|
LittleEndian<u32> cdw15;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct [[gnu::packed]] NVMeRWCmd {
|
||||||
|
LittleEndian<u32> nsid;
|
||||||
|
LittleEndian<u64> rsvd;
|
||||||
|
LittleEndian<u64> metadata;
|
||||||
|
struct DataPtr data_ptr;
|
||||||
|
LittleEndian<u64> slba;
|
||||||
|
LittleEndian<u16> length;
|
||||||
|
LittleEndian<u16> control;
|
||||||
|
LittleEndian<u32> dsmgmt;
|
||||||
|
LittleEndian<u32> reftag;
|
||||||
|
LittleEndian<u16> apptag;
|
||||||
|
LittleEndian<u16> appmask;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct [[gnu::packed]] NVMeIdentifyCmd {
|
||||||
|
LittleEndian<u32> nsid;
|
||||||
|
LittleEndian<u64> rsvd1[2];
|
||||||
|
struct DataPtr data_ptr;
|
||||||
|
u8 cns;
|
||||||
|
u8 rsvd2;
|
||||||
|
LittleEndian<u16> ctrlid;
|
||||||
|
u8 rsvd3[3];
|
||||||
|
u8 csi;
|
||||||
|
u64 rsvd4[2];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct [[gnu::packed]] NVMeCreateCQCmd {
|
||||||
|
u32 rsvd1[5];
|
||||||
|
LittleEndian<u64> prp1;
|
||||||
|
u64 rsvd2;
|
||||||
|
LittleEndian<u16> cqid;
|
||||||
|
LittleEndian<u16> qsize;
|
||||||
|
LittleEndian<u16> cq_flags;
|
||||||
|
LittleEndian<u16> irq_vector;
|
||||||
|
u64 rsvd12[2];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct [[gnu::packed]] NVMeCreateSQCmd {
|
||||||
|
u32 rsvd1[5];
|
||||||
|
LittleEndian<u64> prp1;
|
||||||
|
u64 rsvd2;
|
||||||
|
LittleEndian<u16> sqid;
|
||||||
|
LittleEndian<u16> qsize;
|
||||||
|
LittleEndian<u16> sq_flags;
|
||||||
|
LittleEndian<u16> cqid;
|
||||||
|
u64 rsvd12[2];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct [[gnu::packed]] NVMeSubmission {
|
||||||
|
u8 op;
|
||||||
|
u8 flags;
|
||||||
|
LittleEndian<u16> cmdid;
|
||||||
|
union [[gnu::packed]] {
|
||||||
|
NVMeGenericCmd generic;
|
||||||
|
NVMeIdentifyCmd identify;
|
||||||
|
NVMeRWCmd rw;
|
||||||
|
NVMeCreateCQCmd create_cq;
|
||||||
|
NVMeCreateSQCmd create_sq;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
|
@ -93,7 +93,7 @@ bool NVMeQueue::handle_irq(const RegisterState&)
|
||||||
return nr_of_processed_cqes ? true : false;
|
return nr_of_processed_cqes ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void NVMeQueue::submit_sqe(struct NVMeSubmission& sub)
|
void NVMeQueue::submit_sqe(NVMeSubmission& sub)
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_sq_lock);
|
SpinlockLocker lock(m_sq_lock);
|
||||||
// For now let's use sq tail as a unique command id.
|
// For now let's use sq tail as a unique command id.
|
||||||
|
@ -144,12 +144,11 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
||||||
m_current_request = request;
|
m_current_request = request;
|
||||||
|
|
||||||
sub.op = OP_NVME_READ;
|
sub.op = OP_NVME_READ;
|
||||||
sub.nsid = nsid;
|
sub.rw.nsid = nsid;
|
||||||
sub.cdw10 = AK::convert_between_host_and_little_endian(index & 0xFFFFFFFF);
|
sub.rw.slba = AK::convert_between_host_and_little_endian(index);
|
||||||
sub.cdw11 = AK::convert_between_host_and_little_endian(index >> 32);
|
|
||||||
// No. of lbas is 0 based
|
// No. of lbas is 0 based
|
||||||
sub.cdw12 = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
|
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
|
||||||
sub.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||||
|
|
||||||
full_memory_barrier();
|
full_memory_barrier();
|
||||||
submit_sqe(sub);
|
submit_sqe(sub);
|
||||||
|
@ -166,12 +165,11 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sub.op = OP_NVME_WRITE;
|
sub.op = OP_NVME_WRITE;
|
||||||
sub.nsid = nsid;
|
sub.rw.nsid = nsid;
|
||||||
sub.cdw10 = AK::convert_between_host_and_little_endian(index & 0xFFFFFFFF);
|
sub.rw.slba = AK::convert_between_host_and_little_endian(index);
|
||||||
sub.cdw11 = AK::convert_between_host_and_little_endian(index >> 32);
|
|
||||||
// No. of lbas is 0 based
|
// No. of lbas is 0 based
|
||||||
sub.cdw12 = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
|
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
|
||||||
sub.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||||
|
|
||||||
full_memory_barrier();
|
full_memory_barrier();
|
||||||
submit_sqe(sub);
|
submit_sqe(sub);
|
||||||
|
|
|
@ -34,8 +34,8 @@ public:
|
||||||
explicit NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
|
explicit NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
|
||||||
bool is_admin_queue() { return m_admin_queue; };
|
bool is_admin_queue() { return m_admin_queue; };
|
||||||
bool handle_irq(const RegisterState&) override;
|
bool handle_irq(const RegisterState&) override;
|
||||||
void submit_sqe(struct NVMeSubmission&);
|
void submit_sqe(NVMeSubmission&);
|
||||||
u16 submit_sync_sqe(struct NVMeSubmission&);
|
u16 submit_sync_sqe(NVMeSubmission&);
|
||||||
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
||||||
void write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
void write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
||||||
void enable_interrupts() { enable_irq(); };
|
void enable_interrupts() { enable_irq(); };
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue