1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 23:07:35 +00:00

Kernel: Move VirtIO code away from using a scatter gather list

Currently, when passing buffers into VirtIOQueues, we use scatter-gather
lists, which contain an internal vector of buffers. This vector is
allocated, filled and the destroy whenever we try to provide buffers
into a virtqueue, which would happen a lot in performance cricital code
(the main transport mechanism for certain paravirtualized devices).

This commit moves it over to using VirtIOQueueChains and building the
chain in place in the VirtIOQueue. Also included are a bunch of fixups
for the VirtIO Console device, making it use an internal VM::RingBuffer
instead.
This commit is contained in:
Sahan Fernando 2021-04-24 11:05:51 +10:00 committed by Andreas Kling
parent 13d5cdcd08
commit ed0e7b53a5
11 changed files with 312 additions and 159 deletions

View file

@ -4,7 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/StdLib.h>
#include <AK/Atomic.h>
#include <Kernel/VirtIO/VirtIOQueue.h>
namespace Kernel {
@ -25,10 +25,9 @@ VirtIOQueue::VirtIOQueue(u16 queue_size, u16 notify_offset)
m_descriptors = reinterpret_cast<VirtIOQueueDescriptor*>(ptr);
m_driver = reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors);
m_device = reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver);
m_tokens.resize(queue_size);
for (auto i = 0; i < queue_size; i++) {
m_descriptors[i].next = (i + 1) % queue_size; // link all of the descriptors in a circle
for (auto i = 0; i + 1 < queue_size; i++) {
m_descriptors[i].next = i + 1; // link all of the descriptors in a line
}
enable_interrupts();
@ -40,94 +39,157 @@ VirtIOQueue::~VirtIOQueue()
void VirtIOQueue::enable_interrupts()
{
ScopedSpinLock lock(m_lock);
m_driver->flags = 0;
}
void VirtIOQueue::disable_interrupts()
{
ScopedSpinLock lock(m_lock);
m_driver->flags = 1;
}
bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const ScatterGatherRefList& scatter_list, BufferType buffer_type, void* token)
{
VERIFY(scatter_list.length() && scatter_list.length() <= m_free_buffers);
m_free_buffers -= scatter_list.length();
auto descriptor_index = m_free_head;
auto last_index = descriptor_index;
scatter_list.for_each_entry([&](auto paddr, auto size) {
m_descriptors[descriptor_index].flags = static_cast<u16>(buffer_type) | VIRTQ_DESC_F_NEXT;
m_descriptors[descriptor_index].address = static_cast<u64>(paddr);
m_descriptors[descriptor_index].length = static_cast<u32>(size);
last_index = descriptor_index;
descriptor_index = m_descriptors[descriptor_index].next; // ensure we place the buffer in chain order
});
m_descriptors[last_index].flags &= ~(VIRTQ_DESC_F_NEXT); // last descriptor in chain doesn't have a next descriptor
m_driver->rings[m_driver_index_shadow % m_queue_size] = m_free_head; // m_driver_index_shadow is used to prevent accesses to index before the rings are updated
m_tokens[m_free_head] = token;
m_free_head = descriptor_index;
full_memory_barrier();
m_driver_index_shadow++;
m_driver->index++;
full_memory_barrier();
auto device_flags = m_device->flags;
return !(device_flags & 1); // if bit 1 is enabled the device disabled interrupts
}
bool VirtIOQueue::new_data_available() const
{
return m_device->index != m_used_tail;
const auto index = AK::atomic_load(&m_device->index, AK::MemoryOrder::memory_order_relaxed);
const auto used_tail = AK::atomic_load(&m_used_tail, AK::MemoryOrder::memory_order_relaxed);
return index != used_tail;
}
void* VirtIOQueue::get_buffer(size_t* size)
VirtIOQueueChain VirtIOQueue::pop_used_buffer_chain(size_t& used)
{
VERIFY(m_lock.is_locked());
if (!new_data_available()) {
*size = 0;
return nullptr;
used = 0;
return VirtIOQueueChain(*this);
}
full_memory_barrier();
auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index;
*size = m_device->rings[m_used_tail % m_queue_size].length;
// Determine used length
used = m_device->rings[m_used_tail % m_queue_size].length;
// Determine start, end and number of nodes in chain
auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index;
size_t length_of_chain = 1;
auto last_index = descriptor_index;
while (m_descriptors[last_index].flags & VIRTQ_DESC_F_NEXT) {
++length_of_chain;
last_index = m_descriptors[last_index].next;
}
// We are now done with this buffer chain
m_used_tail++;
auto token = m_tokens[descriptor_index];
pop_buffer(descriptor_index);
return token;
return VirtIOQueueChain(*this, descriptor_index, last_index, length_of_chain);
}
void VirtIOQueue::discard_used_buffers()
{
size_t size;
while (!get_buffer(&size)) {
VERIFY(m_lock.is_locked());
size_t used;
for (auto buffer = pop_used_buffer_chain(used); !buffer.is_empty(); buffer = pop_used_buffer_chain(used)) {
buffer.release_buffer_slots_to_queue();
}
}
void VirtIOQueue::pop_buffer(u16 descriptor_index)
void VirtIOQueue::reclaim_buffer_chain(u16 chain_start_index, u16 chain_end_index, size_t length_of_chain)
{
m_tokens[descriptor_index] = nullptr;
VERIFY(m_lock.is_locked());
m_descriptors[chain_end_index].next = m_free_head;
m_free_head = chain_start_index;
m_free_buffers += length_of_chain;
}
auto i = descriptor_index;
while (m_descriptors[i].flags & VIRTQ_DESC_F_NEXT) {
m_free_buffers++;
i = m_descriptors[i].next;
bool VirtIOQueue::has_free_slots() const
{
const auto free_buffers = AK::atomic_load(&m_free_buffers, AK::MemoryOrder::memory_order_relaxed);
return free_buffers > 0;
}
Optional<u16> VirtIOQueue::take_free_slot()
{
VERIFY(m_lock.is_locked());
if (has_free_slots()) {
auto descriptor_index = m_free_head;
m_free_head = m_descriptors[descriptor_index].next;
--m_free_buffers;
return descriptor_index;
} else {
return {};
}
m_free_buffers++; // the last descriptor in the chain doesn't have the NEXT flag
m_descriptors[i].next = m_free_head; // empend the popped descriptors to the free chain
m_free_head = descriptor_index;
}
bool VirtIOQueue::can_write() const
bool VirtIOQueue::should_notify() const
{
return m_free_buffers > 0;
VERIFY(m_lock.is_locked());
auto device_flags = m_device->flags;
return !(device_flags & VIRTQ_USED_F_NO_NOTIFY);
}
bool VirtIOQueueChain::add_buffer_to_chain(PhysicalAddress buffer_start, size_t buffer_length, BufferType buffer_type)
{
VERIFY(m_queue.lock().is_locked());
// Ensure that no readable pages will be inserted after a writable one, as required by the VirtIO spec
VERIFY(buffer_type == BufferType::DeviceWritable || !m_chain_has_writable_pages);
m_chain_has_writable_pages |= (buffer_type == BufferType::DeviceWritable);
// Take a free slot from the queue
auto descriptor_index = m_queue.take_free_slot();
if (!descriptor_index.has_value())
return false;
if (!m_start_of_chain_index.has_value()) {
// Set start of chain if it hasn't been set
m_start_of_chain_index = descriptor_index.value();
} else {
// Link from previous element in VirtIOQueueChain
m_queue.m_descriptors[m_end_of_chain_index.value()].flags |= VIRTQ_DESC_F_NEXT;
m_queue.m_descriptors[m_end_of_chain_index.value()].next = descriptor_index.value();
}
// Update end of chain
m_end_of_chain_index = descriptor_index.value();
++m_chain_length;
// Populate buffer info
VERIFY(buffer_length <= NumericLimits<size_t>::max());
m_queue.m_descriptors[descriptor_index.value()].address = static_cast<u64>(buffer_start.get());
m_queue.m_descriptors[descriptor_index.value()].flags = static_cast<u16>(buffer_type);
m_queue.m_descriptors[descriptor_index.value()].length = static_cast<u32>(buffer_length);
return true;
}
void VirtIOQueueChain::submit_to_queue()
{
VERIFY(m_queue.lock().is_locked());
VERIFY(m_start_of_chain_index.has_value());
auto next_index = m_queue.m_driver_index_shadow % m_queue.m_queue_size;
m_queue.m_driver->rings[next_index] = m_start_of_chain_index.value();
m_queue.m_driver_index_shadow++;
full_memory_barrier();
m_queue.m_driver->index = m_queue.m_driver_index_shadow;
// Reset internal chain state
m_start_of_chain_index = m_end_of_chain_index = {};
m_chain_has_writable_pages = false;
m_chain_length = 0;
}
void VirtIOQueueChain::release_buffer_slots_to_queue()
{
VERIFY(m_queue.lock().is_locked());
if (m_start_of_chain_index.has_value()) {
// Add the currently stored chain back to the queue's free pool
m_queue.reclaim_buffer_chain(m_start_of_chain_index.value(), m_end_of_chain_index.value(), m_chain_length);
// Reset internal chain state
m_start_of_chain_index = m_end_of_chain_index = {};
m_chain_has_writable_pages = false;
m_chain_length = 0;
}
}
}