1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 14:57:35 +00:00

Kernel: Finish base implementation of VirtQueues

This commit includes a lot of small changes and additions needed to
finalize the base implementation of VirtIOQueues and VirtDevices:
* The device specific driver implementation now has to handle setting
up the queues it needs before letting the base device class know it
finised initialization
* Supplying buffers to VirtQueues is now done via ScatterGatherLists
instead of arbitary buffer pointers - this ensures the pointers are
physical and allows us to follow the specification in regards to the
requirement that individual descriptors must point to physically
contiguous buffers. This can be further improved in the future by
implementating support for the Indirect-Descriptors feature (as
defined by the specification) to reduce descriptor usage for very
fragmented buffers.
* When supplying buffers to a VirtQueue the driver must supply a
(temporarily-)unique token (usually the supplied buffer's virtual
address) to ensure the driver can discern which buffer has finished
processing by the device in the case in which the device does not
offer the F_IN_ORDER feature.
* Device drivers now handle queue updates (supplied buffers being
returned from the device) by implementing a single pure virtual
method instead of setting a seperate callback for each queue
* Two new VirtQueue methods were added to allow the device driver
to either discard or get used/returned buffers from the device by
cleanly removing them off the descriptor chain (This also allows
the VirtQueue implementation to reuse those freed descriptors)

This also includes the necessary changes to the VirtIOConsole
implementation to match these interface changes.

Co-authored-by: Sahan <sahan.h.fernando@gmail.com>
This commit is contained in:
Idan Horowitz 2021-04-15 19:39:48 +10:00 committed by Andreas Kling
parent acdd1424bc
commit d1f7a2f9a5
7 changed files with 154 additions and 85 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -37,17 +37,21 @@ VirtIOQueue::VirtIOQueue(u16 queue_size, u16 notify_offset)
size_t size_of_descriptors = sizeof(VirtIOQueueDescriptor) * queue_size;
size_t size_of_driver = sizeof(VirtIOQueueDriver) + queue_size * sizeof(u16);
size_t size_of_device = sizeof(VirtIOQueueDevice) + queue_size * sizeof(VirtIOQueueDeviceItem);
m_region = MM.allocate_contiguous_kernel_region(page_round_up(size_of_descriptors + size_of_driver + size_of_device), "VirtIO Queue", Region::Access::Read | Region::Access::Write);
if (m_region) {
// TODO: ensure alignment!!!
u8* ptr = m_region->vaddr().as_ptr();
memset(ptr, 0, m_region->size());
m_descriptors = reinterpret_cast<VirtIOQueueDescriptor*>(ptr);
m_driver = reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors);
m_device = reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver);
m_queue_region = MM.allocate_contiguous_kernel_region(page_round_up(size_of_descriptors + size_of_driver + size_of_device), "VirtIO Queue", Region::Access::Read | Region::Access::Write);
VERIFY(m_queue_region);
// TODO: ensure alignment!!!
u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size());
m_descriptors = reinterpret_cast<VirtIOQueueDescriptor*>(ptr);
m_driver = reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors);
m_device = reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver);
m_tokens.resize(queue_size);
enable_interrupts();
for (auto i = 0; i < queue_size; i++) {
m_descriptors[i].next = (i + 1) % queue_size; // link all of the descriptors in a circle
}
enable_interrupts();
}
VirtIOQueue::~VirtIOQueue()
@ -64,20 +68,25 @@ void VirtIOQueue::disable_interrupts()
m_driver->flags = 1;
}
bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const u8* buffer, u32 length, BufferType buffer_type)
bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const ScatterGatherList& scatter_list, BufferType buffer_type, void* token)
{
VERIFY(buffer && length > 0);
VERIFY(m_free_buffers > 0);
VERIFY(scatter_list.length() && scatter_list.length() <= m_free_buffers);
m_free_buffers -= scatter_list.length();
auto descriptor_index = m_free_head;
m_descriptors[descriptor_index].flags = static_cast<u16>(buffer_type);
m_descriptors[descriptor_index].address = reinterpret_cast<u64>(buffer);
m_descriptors[descriptor_index].length = length;
auto last_index = descriptor_index;
scatter_list.for_each_entry([&](auto paddr, auto size) {
m_descriptors[descriptor_index].flags = static_cast<u16>(buffer_type) | VIRTQ_DESC_F_NEXT;
m_descriptors[descriptor_index].address = static_cast<u64>(paddr);
m_descriptors[descriptor_index].length = static_cast<u32>(size);
last_index = descriptor_index;
descriptor_index = m_descriptors[descriptor_index].next; // ensure we place the buffer in chain order
});
m_descriptors[last_index].flags &= ~(VIRTQ_DESC_F_NEXT); // last descriptor in chain doesnt have a next descriptor
m_free_buffers--;
m_free_head = (m_free_head + 1) % m_queue_size;
m_driver->rings[m_driver_index_shadow % m_queue_size] = descriptor_index; // m_driver_index_shadow is used to prevent accesses to index before the rings are updated
m_driver->rings[m_driver_index_shadow % m_queue_size] = m_free_head; // m_driver_index_shadow is used to prevent accesses to index before the rings are updated
m_tokens[m_free_head] = token;
m_free_head = descriptor_index;
full_memory_barrier();
@ -89,18 +98,51 @@ bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const u8* buffer, u32 lengt
auto device_flags = m_device->flags;
return !(device_flags & 1); // if bit 1 is enabled the device disabled interrupts
}
bool VirtIOQueue::new_data_available() const
{
return m_device->index != m_used_tail;
}
bool VirtIOQueue::handle_interrupt()
void* VirtIOQueue::get_buffer(size_t* size)
{
if (!new_data_available())
return false;
if (!new_data_available()) {
*size = 0;
return nullptr;
}
if (on_data_available)
on_data_available();
return true;
full_memory_barrier();
auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index;
*size = m_device->rings[m_used_tail % m_queue_size].length;
m_used_tail++;
auto token = m_tokens[descriptor_index];
pop_buffer(descriptor_index);
return token;
}
void VirtIOQueue::discard_used_buffers()
{
size_t size;
while (!get_buffer(&size)) {
}
}
void VirtIOQueue::pop_buffer(u16 descriptor_index)
{
m_tokens[descriptor_index] = nullptr;
auto i = descriptor_index;
while (m_descriptors[i].flags & VIRTQ_DESC_F_NEXT) {
m_free_buffers++;
i = m_descriptors[i].next;
}
m_free_buffers++; // the last descriptor in the chain doesnt have the NEXT flag
m_descriptors[i].next = m_free_head; // empend the popped descriptors to the free chain
m_free_head = descriptor_index;
}
}