1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-28 15:27:34 +00:00

Kernel: Move VirtIO code into the Bus source folder

The VirtIO code handles functionality related to the VirtIO bus, so it
really should be in the Bus folder.
This commit is contained in:
Liav A 2021-08-13 05:21:19 +03:00 committed by Gunnar Beutner
parent f641cc6470
commit 18eb262157
14 changed files with 23 additions and 23 deletions

View file

@ -0,0 +1,388 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Bus/VirtIO/VirtIO.h>
#include <Kernel/Bus/VirtIO/VirtIOConsole.h>
#include <Kernel/Bus/VirtIO/VirtIORNG.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Sections.h>
namespace Kernel {
UNMAP_AFTER_INIT void VirtIO::detect()
{
if (kernel_command_line().disable_virtio())
return;
PCI::enumerate([&](const PCI::Address& address, PCI::ID id) {
if (address.is_null() || id.is_null())
return;
// TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
if (id.vendor_id != PCI::VendorID::VirtIO)
return;
switch (id.device_id) {
case PCI::DeviceID::VirtIOConsole: {
[[maybe_unused]] auto& unused = adopt_ref(*new VirtIOConsole(address)).leak_ref();
break;
}
case PCI::DeviceID::VirtIOEntropy: {
[[maybe_unused]] auto& unused = adopt_ref(*new VirtIORNG(address)).leak_ref();
break;
}
case PCI::DeviceID::VirtIOGPU: {
// This should have been initialized by the graphics subsystem
break;
}
default:
dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", id.device_id);
break;
}
});
}
UNMAP_AFTER_INIT VirtIODevice::VirtIODevice(PCI::Address address, String class_name)
: PCI::Device(address, PCI::get_interrupt_line(address))
, m_class_name(move(class_name))
, m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
{
dbgln("{}: Found @ {}", m_class_name, pci_address());
enable_bus_mastering(pci_address());
PCI::enable_interrupt_line(pci_address());
enable_irq();
auto capabilities = PCI::get_physical_id(address).capabilities();
for (auto& capability : capabilities) {
if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
// We have a virtio_pci_cap
auto cfg = make<Configuration>();
auto raw_config_type = capability.read8(0x3);
if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
dbgln("{}: Unknown capability configuration type: {}", m_class_name, raw_config_type);
return;
}
cfg->cfg_type = static_cast<ConfigurationType>(raw_config_type);
auto cap_length = capability.read8(0x2);
if (cap_length < 0x10) {
dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length);
break;
}
cfg->bar = capability.read8(0x4);
if (cfg->bar > 0x5) {
dbgln("{}: Unexpected capability bar value: {}", m_class_name, cfg->bar);
break;
}
cfg->offset = capability.read32(0x8);
cfg->length = capability.read32(0xc);
dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, (u32)cfg->cfg_type, cfg->bar, cfg->offset, cfg->length);
if (cfg->cfg_type == ConfigurationType::Common)
m_use_mmio = true;
else if (cfg->cfg_type == ConfigurationType::Notify)
m_notify_multiplier = capability.read32(0x10);
m_configs.append(move(cfg));
}
}
if (m_use_mmio) {
m_common_cfg = get_config(ConfigurationType::Common, 0);
m_notify_cfg = get_config(ConfigurationType::Notify, 0);
m_isr_cfg = get_config(ConfigurationType::ISR, 0);
}
reset_device();
set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
set_status_bit(DEVICE_STATUS_DRIVER);
}
VirtIODevice::~VirtIODevice()
{
}
auto VirtIODevice::mapping_for_bar(u8 bar) -> MappedMMIO&
{
VERIFY(m_use_mmio);
auto& mapping = m_mmio[bar];
if (!mapping.base) {
mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
if (!mapping.base)
dbgln("{}: Failed to map bar {}", m_class_name, bar);
}
return mapping;
}
void VirtIODevice::notify_queue(u16 queue_index)
{
dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", m_class_name, queue_index);
if (!m_notify_cfg)
out<u16>(REG_QUEUE_NOTIFY, queue_index);
else
config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
}
u8 VirtIODevice::config_read8(const Configuration& config, u32 offset)
{
return mapping_for_bar(config.bar).read<u8>(config.offset + offset);
}
u16 VirtIODevice::config_read16(const Configuration& config, u32 offset)
{
return mapping_for_bar(config.bar).read<u16>(config.offset + offset);
}
u32 VirtIODevice::config_read32(const Configuration& config, u32 offset)
{
return mapping_for_bar(config.bar).read<u32>(config.offset + offset);
}
void VirtIODevice::config_write8(const Configuration& config, u32 offset, u8 value)
{
mapping_for_bar(config.bar).write(config.offset + offset, value);
}
void VirtIODevice::config_write16(const Configuration& config, u32 offset, u16 value)
{
mapping_for_bar(config.bar).write(config.offset + offset, value);
}
void VirtIODevice::config_write32(const Configuration& config, u32 offset, u32 value)
{
mapping_for_bar(config.bar).write(config.offset + offset, value);
}
void VirtIODevice::config_write64(const Configuration& config, u32 offset, u64 value)
{
mapping_for_bar(config.bar).write(config.offset + offset, value);
}
u8 VirtIODevice::read_status_bits()
{
if (!m_common_cfg)
return in<u8>(REG_DEVICE_STATUS);
return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
}
void VirtIODevice::mask_status_bits(u8 status_mask)
{
m_status &= status_mask;
if (!m_common_cfg)
out<u8>(REG_DEVICE_STATUS, m_status);
else
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
}
void VirtIODevice::set_status_bit(u8 status_bit)
{
m_status |= status_bit;
if (!m_common_cfg)
out<u8>(REG_DEVICE_STATUS, m_status);
else
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
}
u64 VirtIODevice::get_device_features()
{
if (!m_common_cfg)
return in<u32>(REG_DEVICE_FEATURES);
config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
return upper_bits | lower_bits;
}
bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_features)
{
VERIFY(!m_did_accept_features);
m_did_accept_features = true;
if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
}
if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", m_class_name);
accepted_features &= ~(VIRTIO_F_RING_PACKED);
}
// TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
// accepted_features |= VIRTIO_F_INDIRECT_DESC;
}
if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
accepted_features |= VIRTIO_F_IN_ORDER;
}
dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", m_class_name, device_features);
dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", m_class_name, accepted_features);
if (!m_common_cfg) {
out<u32>(REG_GUEST_FEATURES, accepted_features);
} else {
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
}
set_status_bit(DEVICE_STATUS_FEATURES_OK);
m_status = read_status_bits();
if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
set_status_bit(DEVICE_STATUS_FAILED);
dbgln("{}: Features not accepted by host!", m_class_name);
return false;
}
m_accepted_features = accepted_features;
dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", m_class_name);
return true;
}
void VirtIODevice::reset_device()
{
dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name);
if (!m_common_cfg) {
mask_status_bits(0);
while (read_status_bits() != 0) {
// TODO: delay a bit?
}
return;
}
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
// TODO: delay a bit?
}
}
bool VirtIODevice::setup_queue(u16 queue_index)
{
if (!m_common_cfg)
return false;
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
if (queue_size == 0) {
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index);
return true;
}
u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
auto queue = make<VirtIOQueue>(queue_size, queue_notify_offset);
if (queue->is_null())
return false;
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", m_class_name, queue_index, queue_size);
m_queues.append(move(queue));
return true;
}
bool VirtIODevice::activate_queue(u16 queue_index)
{
if (!m_common_cfg)
return false;
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", m_class_name, queue_index);
return true;
}
bool VirtIODevice::setup_queues(u16 requested_queue_count)
{
VERIFY(!m_did_setup_queues);
m_did_setup_queues = true;
if (m_common_cfg) {
auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
if (requested_queue_count == 0) {
m_queue_count = maximum_queue_count;
} else if (requested_queue_count > maximum_queue_count) {
dbgln("{}: {} queues requested but only {} available!", m_class_name, m_queue_count, maximum_queue_count);
return false;
} else {
m_queue_count = requested_queue_count;
}
} else {
m_queue_count = requested_queue_count;
dbgln("{}: device's available queue count could not be determined!", m_class_name);
}
dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", m_class_name, m_queue_count);
for (u16 i = 0; i < m_queue_count; i++) {
if (!setup_queue(i))
return false;
}
for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
if (!activate_queue(i))
return false;
}
return true;
}
void VirtIODevice::finish_init()
{
VERIFY(m_did_accept_features); // ensure features were negotiated
VERIFY(m_did_setup_queues); // ensure queues were set-up
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
set_status_bit(DEVICE_STATUS_DRIVER_OK);
dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
}
u8 VirtIODevice::isr_status()
{
if (!m_isr_cfg)
return in<u8>(REG_ISR_STATUS);
return config_read8(*m_isr_cfg, 0);
}
bool VirtIODevice::handle_irq(const RegisterState&)
{
u8 isr_type = isr_status();
if ((isr_type & (QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT)) == 0) {
dbgln_if(VIRTIO_DEBUG, "{}: Handling interrupt with unknown type: {}", m_class_name, isr_type);
return false;
}
if (isr_type & DEVICE_CONFIG_INTERRUPT) {
dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Device config interrupt!", m_class_name);
if (!handle_device_config_change()) {
set_status_bit(DEVICE_STATUS_FAILED);
dbgln("{}: Failed to handle device config change!", m_class_name);
}
}
if (isr_type & QUEUE_INTERRUPT) {
dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Queue interrupt!", m_class_name);
for (size_t i = 0; i < m_queues.size(); i++) {
if (get_queue(i).new_data_available()) {
handle_queue_update(i);
return true;
}
}
dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", m_class_name);
}
return true;
}
void VirtIODevice::supply_chain_and_notify(u16 queue_index, VirtIOQueueChain& chain)
{
auto& queue = get_queue(queue_index);
VERIFY(&chain.queue() == &queue);
VERIFY(queue.lock().is_locked());
chain.submit_to_queue();
if (queue.should_notify())
notify_queue(queue_index);
}
}

244
Kernel/Bus/VirtIO/VirtIO.h Normal file
View file

@ -0,0 +1,244 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/NonnullOwnPtrVector.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Bus/VirtIO/VirtIOQueue.h>
#include <Kernel/IO.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Memory/MemoryManager.h>
namespace Kernel {
#define REG_DEVICE_FEATURES 0x0
#define REG_GUEST_FEATURES 0x4
#define REG_QUEUE_ADDRESS 0x8
#define REG_QUEUE_SIZE 0xc
#define REG_QUEUE_SELECT 0xe
#define REG_QUEUE_NOTIFY 0x10
#define REG_DEVICE_STATUS 0x12
#define REG_ISR_STATUS 0x13
#define DEVICE_STATUS_ACKNOWLEDGE (1 << 0)
#define DEVICE_STATUS_DRIVER (1 << 1)
#define DEVICE_STATUS_DRIVER_OK (1 << 2)
#define DEVICE_STATUS_FEATURES_OK (1 << 3)
#define DEVICE_STATUS_DEVICE_NEEDS_RESET (1 << 6)
#define DEVICE_STATUS_FAILED (1 << 7)
#define VIRTIO_F_INDIRECT_DESC ((u64)1 << 28)
#define VIRTIO_F_VERSION_1 ((u64)1 << 32)
#define VIRTIO_F_RING_PACKED ((u64)1 << 34)
#define VIRTIO_F_IN_ORDER ((u64)1 << 35)
#define VIRTIO_PCI_CAP_COMMON_CFG 1
#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
#define VIRTIO_PCI_CAP_ISR_CFG 3
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
#define VIRTIO_PCI_CAP_PCI_CFG 5
// virtio_pci_common_cfg
#define COMMON_CFG_DEVICE_FEATURE_SELECT 0x0
#define COMMON_CFG_DEVICE_FEATURE 0x4
#define COMMON_CFG_DRIVER_FEATURE_SELECT 0x8
#define COMMON_CFG_DRIVER_FEATURE 0xc
#define COMMON_CFG_MSIX_CONFIG 0x10
#define COMMON_CFG_NUM_QUEUES 0x12
#define COMMON_CFG_DEVICE_STATUS 0x14
#define COMMON_CFG_CONFIG_GENERATION 0x15
#define COMMON_CFG_QUEUE_SELECT 0x16
#define COMMON_CFG_QUEUE_SIZE 0x18
#define COMMON_CFG_QUEUE_MSIX_VECTOR 0x1a
#define COMMON_CFG_QUEUE_ENABLE 0x1c
#define COMMON_CFG_QUEUE_NOTIFY_OFF 0x1e
#define COMMON_CFG_QUEUE_DESC 0x20
#define COMMON_CFG_QUEUE_DRIVER 0x28
#define COMMON_CFG_QUEUE_DEVICE 0x30
#define QUEUE_INTERRUPT 0x1
#define DEVICE_CONFIG_INTERRUPT 0x2
enum class ConfigurationType : u8 {
Common = 1,
Notify = 2,
ISR = 3,
Device = 4,
PCI = 5
};
struct Configuration {
ConfigurationType cfg_type;
u8 bar;
u32 offset;
u32 length;
};
class VirtIO {
public:
static void detect();
};
class VirtIODevice : public PCI::Device {
public:
VirtIODevice(PCI::Address, String);
virtual ~VirtIODevice() override;
protected:
const String m_class_name;
struct MappedMMIO {
OwnPtr<Memory::Region> base;
size_t size { 0 };
template<typename T>
T read(u32 offset) const
{
if (!base)
return 0;
VERIFY(size >= sizeof(T));
VERIFY(offset + sizeof(T) <= size);
return *(volatile T*)(base->vaddr().offset(offset).get());
}
template<typename T>
void write(u32 offset, T value)
{
if (!base)
return;
VERIFY(size >= sizeof(T));
VERIFY(offset + sizeof(T) <= size);
*(volatile T*)(base->vaddr().offset(offset).get()) = value;
}
};
const Configuration* get_config(ConfigurationType cfg_type, u32 index = 0) const
{
for (auto& cfg : m_configs) {
if (cfg.cfg_type != cfg_type)
continue;
if (index > 0) {
index--;
continue;
}
return &cfg;
}
return nullptr;
}
template<typename F>
void read_config_atomic(F f)
{
if (m_common_cfg) {
u8 generation_before, generation_after;
do {
generation_before = config_read8(*m_common_cfg, 0x15);
f();
generation_after = config_read8(*m_common_cfg, 0x15);
} while (generation_before != generation_after);
} else {
f();
}
}
u8 config_read8(const Configuration&, u32);
u16 config_read16(const Configuration&, u32);
u32 config_read32(const Configuration&, u32);
void config_write8(const Configuration&, u32, u8);
void config_write16(const Configuration&, u32, u16);
void config_write32(const Configuration&, u32, u32);
void config_write64(const Configuration&, u32, u64);
auto mapping_for_bar(u8) -> MappedMMIO&;
u8 read_status_bits();
void mask_status_bits(u8 status_mask);
void set_status_bit(u8);
u64 get_device_features();
bool setup_queues(u16 requested_queue_count = 0);
void finish_init();
VirtIOQueue& get_queue(u16 queue_index)
{
VERIFY(queue_index < m_queue_count);
return m_queues[queue_index];
}
const VirtIOQueue& get_queue(u16 queue_index) const
{
VERIFY(queue_index < m_queue_count);
return m_queues[queue_index];
}
template<typename F>
bool negotiate_features(F f)
{
u64 device_features = get_device_features();
u64 accept_features = f(device_features);
VERIFY(!(~device_features & accept_features));
return accept_device_features(device_features, accept_features);
}
static bool is_feature_set(u64 feature_set, u64 test_feature)
{
// features can have more than one bit
return (feature_set & test_feature) == test_feature;
}
bool is_feature_accepted(u64 feature) const
{
VERIFY(m_did_accept_features);
return is_feature_set(m_accepted_features, feature);
}
void supply_chain_and_notify(u16 queue_index, VirtIOQueueChain& chain);
virtual bool handle_device_config_change() = 0;
virtual void handle_queue_update(u16 queue_index) = 0;
private:
template<typename T>
void out(u16 address, T value)
{
m_io_base.offset(address).out(value);
}
template<typename T>
T in(u16 address)
{
return m_io_base.offset(address).in<T>();
}
bool accept_device_features(u64 device_features, u64 accepted_features);
bool setup_queue(u16 queue_index);
bool activate_queue(u16 queue_index);
void notify_queue(u16 queue_index);
void reset_device();
u8 isr_status();
virtual bool handle_irq(const RegisterState&) override;
NonnullOwnPtrVector<VirtIOQueue> m_queues;
NonnullOwnPtrVector<Configuration> m_configs;
const Configuration* m_common_cfg { nullptr }; // Cached due to high usage
const Configuration* m_notify_cfg { nullptr }; // Cached due to high usage
const Configuration* m_isr_cfg { nullptr }; // Cached due to high usage
IOAddress m_io_base;
MappedMMIO m_mmio[6];
u16 m_queue_count { 0 };
bool m_use_mmio { false };
u8 m_status { 0 };
u64 m_accepted_features { 0 };
bool m_did_accept_features { false };
bool m_did_setup_queues { false };
u32 m_notify_multiplier { 0 };
};
}

View file

@ -0,0 +1,218 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
* Copyright (c) 2021, Kyle Pereira <hey@xylepereira.me>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/VirtIO/VirtIOConsole.h>
#include <Kernel/Sections.h>
namespace Kernel {
unsigned VirtIOConsole::next_device_id = 0;
UNMAP_AFTER_INIT VirtIOConsole::VirtIOConsole(PCI::Address address)
: VirtIODevice(address, "VirtIOConsole")
, m_device_id(next_device_id++)
{
if (auto cfg = get_config(ConfigurationType::Device)) {
bool success = negotiate_features([&](u64 supported_features) {
u64 negotiated = 0;
if (is_feature_set(supported_features, VIRTIO_CONSOLE_F_SIZE))
dbgln("VirtIOConsole: Console size is not yet supported!");
if (is_feature_set(supported_features, VIRTIO_CONSOLE_F_MULTIPORT))
negotiated |= VIRTIO_CONSOLE_F_MULTIPORT;
return negotiated;
});
if (success) {
u32 max_nr_ports = 0;
u16 cols = 0, rows = 0;
read_config_atomic([&]() {
if (is_feature_accepted(VIRTIO_CONSOLE_F_SIZE)) {
cols = config_read16(*cfg, 0x0);
rows = config_read16(*cfg, 0x2);
}
if (is_feature_accepted(VIRTIO_CONSOLE_F_MULTIPORT)) {
max_nr_ports = config_read32(*cfg, 0x4);
m_ports.resize(max_nr_ports);
}
});
dbgln("VirtIOConsole: cols: {}, rows: {}, max nr ports {}", cols, rows, max_nr_ports);
// Base receiveq/transmitq for port0 + optional control queues and 2 per every additional port
success = setup_queues(2 + max_nr_ports > 0 ? 2 + 2 * max_nr_ports : 0);
}
if (success) {
finish_init();
if (is_feature_accepted(VIRTIO_CONSOLE_F_MULTIPORT))
setup_multiport();
else
m_ports.append(new VirtIOConsolePort(0u, *this));
}
}
}
bool VirtIOConsole::handle_device_config_change()
{
dbgln("VirtIOConsole: Handle device config change");
return true;
}
void VirtIOConsole::handle_queue_update(u16 queue_index)
{
dbgln_if(VIRTIO_DEBUG, "VirtIOConsole: Handle queue update {}", queue_index);
if (queue_index == CONTROL_RECEIVEQ) {
ScopedSpinLock ringbuffer_lock(m_control_receive_buffer->lock());
auto& queue = get_queue(CONTROL_RECEIVEQ);
ScopedSpinLock queue_lock(queue.lock());
size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
while (!popped_chain.is_empty()) {
popped_chain.for_each([&](auto addr, auto) {
auto offset = addr.as_ptr() - m_control_receive_buffer->start_of_region().as_ptr();
auto* message = reinterpret_cast<ControlMessage*>(m_control_receive_buffer->vaddr().offset(offset).as_ptr());
process_control_message(*message);
});
supply_chain_and_notify(CONTROL_RECEIVEQ, popped_chain);
popped_chain = queue.pop_used_buffer_chain(used);
}
} else if (queue_index == CONTROL_TRANSMITQ) {
ScopedSpinLock ringbuffer_lock(m_control_transmit_buffer->lock());
auto& queue = get_queue(CONTROL_TRANSMITQ);
ScopedSpinLock queue_lock(queue.lock());
size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
auto number_of_messages = 0;
do {
popped_chain.for_each([this](PhysicalAddress address, size_t length) {
m_control_transmit_buffer->reclaim_space(address, length);
});
popped_chain.release_buffer_slots_to_queue();
popped_chain = queue.pop_used_buffer_chain(used);
number_of_messages++;
} while (!popped_chain.is_empty());
m_control_wait_queue.wake_n(number_of_messages);
} else {
u32 port_index = queue_index < 2 ? 0 : (queue_index - 2) / 2;
if (port_index >= m_ports.size() || !m_ports.at(port_index)) {
dbgln("Invalid queue_index {}", queue_index);
return;
}
m_ports.at(port_index)->handle_queue_update({}, queue_index);
}
}
void VirtIOConsole::setup_multiport()
{
m_control_receive_buffer = make<Memory::RingBuffer>("VirtIOConsole control receive queue", CONTROL_BUFFER_SIZE);
m_control_transmit_buffer = make<Memory::RingBuffer>("VirtIOConsole control transmit queue", CONTROL_BUFFER_SIZE);
auto& queue = get_queue(CONTROL_RECEIVEQ);
ScopedSpinLock queue_lock(queue.lock());
VirtIOQueueChain chain(queue);
auto offset = 0ul;
while (offset < CONTROL_BUFFER_SIZE) {
auto buffer_start = m_control_receive_buffer->start_of_region().offset(offset);
auto did_add_buffer = chain.add_buffer_to_chain(buffer_start, CONTROL_MESSAGE_SIZE, BufferType::DeviceWritable);
VERIFY(did_add_buffer);
offset += CONTROL_MESSAGE_SIZE;
supply_chain_and_notify(CONTROL_RECEIVEQ, chain);
}
ControlMessage ready_event {
.id = 0, // Unused
.event = (u16)ControlEvent::DeviceReady,
.value = (u16)ControlMessage::Status::Success
};
write_control_message(ready_event);
}
void VirtIOConsole::process_control_message(ControlMessage message)
{
switch (message.event) {
case (u16)ControlEvent::DeviceAdd: {
u32 id = message.id;
if (id >= m_ports.size()) {
dbgln("Device provided an invalid port number {}. max_nr_ports: {}", id, m_ports.size());
return;
} else if (!m_ports.at(id).is_null()) {
dbgln("Device tried to add port {} which was already added!", id);
return;
}
m_ports.at(id) = new VirtIOConsolePort(id, *this);
ControlMessage ready_event {
.id = static_cast<u32>(id),
.event = (u16)ControlEvent::PortReady,
.value = (u16)ControlMessage::Status::Success
};
write_control_message(ready_event);
break;
}
case (u16)ControlEvent::ConsolePort:
case (u16)ControlEvent::PortOpen: {
if (message.id >= m_ports.size()) {
dbgln("Device provided an invalid port number {}. max_nr_ports: {}", message.id, m_ports.size());
return;
} else if (m_ports.at(message.id).is_null()) {
dbgln("Device tried to open port {} which was not added!", message.id);
return;
}
if (message.value == (u16)ControlMessage::PortStatus::Open) {
auto is_open = m_ports.at(message.id)->is_open();
if (!is_open) {
m_ports.at(message.id)->set_open({}, true);
send_open_control_message(message.id, true);
}
} else if (message.value == (u16)ControlMessage::PortStatus::Close) {
m_ports.at(message.id)->set_open({}, false);
} else {
dbgln("Device specified invalid value {}. Must be 0 or 1.", message.value);
}
break;
}
default:
dbgln("Unhandled message event {}!", message.event);
}
}
void VirtIOConsole::write_control_message(ControlMessage message)
{
ScopedSpinLock ringbuffer_lock(m_control_transmit_buffer->lock());
PhysicalAddress start_of_chunk;
size_t length_of_chunk;
auto data = UserOrKernelBuffer::for_kernel_buffer((u8*)&message);
while (!m_control_transmit_buffer->copy_data_in(data, 0, sizeof(message), start_of_chunk, length_of_chunk)) {
ringbuffer_lock.unlock();
m_control_wait_queue.wait_forever();
ringbuffer_lock.lock();
}
auto& queue = get_queue(CONTROL_TRANSMITQ);
ScopedSpinLock queue_lock(queue.lock());
VirtIOQueueChain chain(queue);
bool did_add_buffer = chain.add_buffer_to_chain(start_of_chunk, length_of_chunk, BufferType::DeviceReadable);
VERIFY(did_add_buffer);
supply_chain_and_notify(CONTROL_TRANSMITQ, chain);
}
void VirtIOConsole::send_open_control_message(unsigned port_number, bool open)
{
ControlMessage port_open {
.id = static_cast<u32>(port_number),
.event = (u16)ControlEvent::PortOpen,
.value = open
};
write_control_message(port_open);
}
}

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2021, Kyle Pereira <hey@xylepereira.me>
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Bus/VirtIO/VirtIO.h>
#include <Kernel/Bus/VirtIO/VirtIOConsolePort.h>
#include <Kernel/Memory/RingBuffer.h>
namespace Kernel {
class VirtIOConsole
: public VirtIODevice
, public RefCounted<VirtIOConsole> {
friend VirtIOConsolePort;
public:
VirtIOConsole(PCI::Address);
virtual ~VirtIOConsole() override = default;
virtual StringView purpose() const override { return "VirtIOConsole"; }
unsigned device_id() const
{
return m_device_id;
}
private:
enum class ControlEvent : u16 {
DeviceReady = 0,
DeviceAdd = 1,
PortReady = 3,
ConsolePort = 4,
PortOpen = 6,
};
struct [[gnu::packed]] ControlMessage {
u32 id;
u16 event;
u16 value;
enum class Status : u16 {
Success = 1,
Failure = 0
};
enum class PortStatus : u16 {
Open = 1,
Close = 0
};
};
constexpr static u16 CONTROL_RECEIVEQ = 2;
constexpr static u16 CONTROL_TRANSMITQ = 3;
constexpr static size_t CONTROL_MESSAGE_SIZE = sizeof(ControlMessage);
constexpr static size_t CONTROL_BUFFER_SIZE = CONTROL_MESSAGE_SIZE * 32;
virtual bool handle_device_config_change() override;
virtual void handle_queue_update(u16 queue_index) override;
Vector<RefPtr<VirtIOConsolePort>> m_ports;
void setup_multiport();
void process_control_message(ControlMessage message);
void write_control_message(ControlMessage message);
void send_open_control_message(unsigned port_number, bool open);
unsigned m_device_id;
OwnPtr<Memory::RingBuffer> m_control_transmit_buffer;
OwnPtr<Memory::RingBuffer> m_control_receive_buffer;
WaitQueue m_control_wait_queue;
static unsigned next_device_id;
};
}

View file

@ -0,0 +1,167 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
* Copyright (c) 2021, Kyle Pereira <hey@xylepereira.me>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/VirtIO/VirtIOConsole.h>
#include <Kernel/Bus/VirtIO/VirtIOConsolePort.h>
namespace Kernel {
unsigned VirtIOConsolePort::next_device_id = 0;
VirtIOConsolePort::VirtIOConsolePort(unsigned port, VirtIOConsole& console)
: CharacterDevice(229, next_device_id++)
, m_console(console)
, m_port(port)
{
m_receive_buffer = make<Memory::RingBuffer>("VirtIOConsolePort Receive", RINGBUFFER_SIZE);
m_transmit_buffer = make<Memory::RingBuffer>("VirtIOConsolePort Transmit", RINGBUFFER_SIZE);
m_receive_queue = m_port == 0 ? 0 : m_port * 2 + 2;
m_transmit_queue = m_port == 0 ? 1 : m_port * 2 + 3;
init_receive_buffer();
}
void VirtIOConsolePort::init_receive_buffer()
{
auto& queue = m_console.get_queue(m_receive_queue);
ScopedSpinLock queue_lock(queue.lock());
VirtIOQueueChain chain(queue);
auto buffer_start = m_receive_buffer->start_of_region();
auto did_add_buffer = chain.add_buffer_to_chain(buffer_start, RINGBUFFER_SIZE, BufferType::DeviceWritable);
VERIFY(did_add_buffer);
m_console.supply_chain_and_notify(m_receive_queue, chain);
}
void VirtIOConsolePort::handle_queue_update(Badge<VirtIOConsole>, u16 queue_index)
{
dbgln_if(VIRTIO_DEBUG, "VirtIOConsolePort: Handle queue update for port {}", m_port);
VERIFY(queue_index == m_transmit_queue || queue_index == m_receive_queue);
if (queue_index == m_receive_queue) {
auto& queue = m_console.get_queue(m_receive_queue);
ScopedSpinLock queue_lock(queue.lock());
size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
ScopedSpinLock ringbuffer_lock(m_receive_buffer->lock());
auto used_space = m_receive_buffer->reserve_space(used).value();
auto remaining_space = m_receive_buffer->bytes_till_end();
// Our algorithm always has only one buffer in the queue.
VERIFY(popped_chain.length() == 1);
VERIFY(!queue.new_data_available());
popped_chain.release_buffer_slots_to_queue();
VirtIOQueueChain new_chain(queue);
if (remaining_space != 0) {
new_chain.add_buffer_to_chain(used_space.offset(used), remaining_space, BufferType::DeviceWritable);
m_console.supply_chain_and_notify(m_receive_queue, new_chain);
} else {
m_receive_buffer_exhausted = true;
}
evaluate_block_conditions();
} else {
ScopedSpinLock ringbuffer_lock(m_transmit_buffer->lock());
auto& queue = m_console.get_queue(m_transmit_queue);
ScopedSpinLock queue_lock(queue.lock());
size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
do {
popped_chain.for_each([this](PhysicalAddress address, size_t length) {
m_transmit_buffer->reclaim_space(address, length);
});
popped_chain.release_buffer_slots_to_queue();
popped_chain = queue.pop_used_buffer_chain(used);
} while (!popped_chain.is_empty());
// Unblock any IO tasks that were blocked because can_write() returned false
evaluate_block_conditions();
}
}
bool VirtIOConsolePort::can_read(const FileDescription&, size_t) const
{
return m_receive_buffer->used_bytes() > 0;
}
KResultOr<size_t> VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKernelBuffer& buffer, size_t size)
{
if (!size)
return 0;
ScopedSpinLock ringbuffer_lock(m_receive_buffer->lock());
if (!can_read(desc, size))
return EAGAIN;
auto bytes_copied = m_receive_buffer->copy_data_out(size, buffer).value();
m_receive_buffer->reclaim_space(m_receive_buffer->start_of_used(), bytes_copied);
if (m_receive_buffer_exhausted && m_receive_buffer->used_bytes() == 0) {
auto& queue = m_console.get_queue(m_receive_queue);
ScopedSpinLock queue_lock(queue.lock());
VirtIOQueueChain new_chain(queue);
new_chain.add_buffer_to_chain(m_receive_buffer->start_of_region(), RINGBUFFER_SIZE, BufferType::DeviceWritable);
m_console.supply_chain_and_notify(m_receive_queue, new_chain);
m_receive_buffer_exhausted = false;
}
return bytes_copied;
}
bool VirtIOConsolePort::can_write(const FileDescription&, size_t) const
{
return m_console.get_queue(m_transmit_queue).has_free_slots() && m_transmit_buffer->has_space();
}
KResultOr<size_t> VirtIOConsolePort::write(FileDescription& desc, u64, const UserOrKernelBuffer& data, size_t size)
{
if (!size)
return 0;
ScopedSpinLock ringbuffer_lock(m_transmit_buffer->lock());
auto& queue = m_console.get_queue(m_transmit_queue);
ScopedSpinLock queue_lock(queue.lock());
if (!can_write(desc, size))
return EAGAIN;
VirtIOQueueChain chain(queue);
size_t total_bytes_copied = 0;
do {
PhysicalAddress start_of_chunk;
size_t length_of_chunk;
if (!m_transmit_buffer->copy_data_in(data, total_bytes_copied, size - total_bytes_copied, start_of_chunk, length_of_chunk)) {
chain.release_buffer_slots_to_queue();
return EINVAL;
}
bool did_add_buffer = chain.add_buffer_to_chain(start_of_chunk, length_of_chunk, BufferType::DeviceReadable);
VERIFY(did_add_buffer);
total_bytes_copied += length_of_chunk;
} while (total_bytes_copied < size && can_write(desc, size));
m_console.supply_chain_and_notify(m_transmit_queue, chain);
return total_bytes_copied;
}
String VirtIOConsolePort::device_name() const
{
return String::formatted("hvc{}p{}", m_console.device_id(), m_port);
}
KResultOr<NonnullRefPtr<FileDescription>> VirtIOConsolePort::open(int options)
{
if (!m_open)
m_console.send_open_control_message(m_port, true);
return File::open(options);
}
}

View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
* Copyright (c) 2021, Kyle Pereira <hey@xylepereira.me>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Bus/VirtIO/VirtIO.h>
#include <Kernel/Devices/CharacterDevice.h>
#include <Kernel/FileSystem/FileDescription.h>
#include <Kernel/Memory/RingBuffer.h>
namespace Kernel {
class VirtIOConsole;
#define VIRTIO_CONSOLE_F_SIZE (1 << 0)
#define VIRTIO_CONSOLE_F_MULTIPORT (1 << 1)
#define VIRTIO_CONSOLE_F_EMERG_WRITE (1 << 2)
class VirtIOConsolePort
: public CharacterDevice {
public:
explicit VirtIOConsolePort(unsigned port, VirtIOConsole&);
void handle_queue_update(Badge<VirtIOConsole>, u16 queue_index);
void set_open(Badge<VirtIOConsole>, bool state) { m_open = state; }
bool is_open() const { return m_open; }
private:
constexpr static size_t RINGBUFFER_SIZE = 2 * PAGE_SIZE;
virtual StringView class_name() const override { return "VirtIOConsolePort"; }
virtual bool can_read(const FileDescription&, size_t) const override;
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override;
virtual bool can_write(const FileDescription&, size_t) const override;
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override;
virtual KResultOr<NonnullRefPtr<FileDescription>> open(int options) override;
mode_t required_mode() const override { return 0666; }
String device_name() const override;
void init_receive_buffer();
static unsigned next_device_id;
u16 m_receive_queue {};
u16 m_transmit_queue {};
OwnPtr<Memory::RingBuffer> m_receive_buffer;
OwnPtr<Memory::RingBuffer> m_transmit_buffer;
VirtIOConsole& m_console;
unsigned m_port;
bool m_open { false };
Atomic<bool> m_receive_buffer_exhausted;
};
}

View file

@ -0,0 +1,199 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Atomic.h>
#include <Kernel/Bus/VirtIO/VirtIOQueue.h>
namespace Kernel {
VirtIOQueue::VirtIOQueue(u16 queue_size, u16 notify_offset)
: m_queue_size(queue_size)
, m_notify_offset(notify_offset)
, m_free_buffers(queue_size)
{
size_t size_of_descriptors = sizeof(VirtIOQueueDescriptor) * queue_size;
size_t size_of_driver = sizeof(VirtIOQueueDriver) + queue_size * sizeof(u16);
size_t size_of_device = sizeof(VirtIOQueueDevice) + queue_size * sizeof(VirtIOQueueDeviceItem);
auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device);
if (queue_region_size <= PAGE_SIZE)
m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite);
else
m_queue_region = MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite);
VERIFY(m_queue_region);
// TODO: ensure alignment!!!
u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size());
m_descriptors = adopt_own_if_nonnull(reinterpret_cast<VirtIOQueueDescriptor*>(ptr));
m_driver = adopt_own_if_nonnull(reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors));
m_device = adopt_own_if_nonnull(reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver));
for (auto i = 0; i + 1 < queue_size; i++) {
m_descriptors[i].next = i + 1; // link all of the descriptors in a line
}
enable_interrupts();
}
VirtIOQueue::~VirtIOQueue()
{
}
void VirtIOQueue::enable_interrupts()
{
ScopedSpinLock lock(m_lock);
m_driver->flags = 0;
}
void VirtIOQueue::disable_interrupts()
{
ScopedSpinLock lock(m_lock);
m_driver->flags = 1;
}
bool VirtIOQueue::new_data_available() const
{
const auto index = AK::atomic_load(&m_device->index, AK::MemoryOrder::memory_order_relaxed);
const auto used_tail = AK::atomic_load(&m_used_tail, AK::MemoryOrder::memory_order_relaxed);
return index != used_tail;
}
VirtIOQueueChain VirtIOQueue::pop_used_buffer_chain(size_t& used)
{
VERIFY(m_lock.is_locked());
if (!new_data_available()) {
used = 0;
return VirtIOQueueChain(*this);
}
full_memory_barrier();
// Determine used length
used = m_device->rings[m_used_tail % m_queue_size].length;
// Determine start, end and number of nodes in chain
auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index;
size_t length_of_chain = 1;
auto last_index = descriptor_index;
while (m_descriptors[last_index].flags & VIRTQ_DESC_F_NEXT) {
++length_of_chain;
last_index = m_descriptors[last_index].next;
}
// We are now done with this buffer chain
m_used_tail++;
return VirtIOQueueChain(*this, descriptor_index, last_index, length_of_chain);
}
void VirtIOQueue::discard_used_buffers()
{
VERIFY(m_lock.is_locked());
size_t used;
for (auto buffer = pop_used_buffer_chain(used); !buffer.is_empty(); buffer = pop_used_buffer_chain(used)) {
buffer.release_buffer_slots_to_queue();
}
}
void VirtIOQueue::reclaim_buffer_chain(u16 chain_start_index, u16 chain_end_index, size_t length_of_chain)
{
VERIFY(m_lock.is_locked());
m_descriptors[chain_end_index].next = m_free_head;
m_free_head = chain_start_index;
m_free_buffers += length_of_chain;
}
bool VirtIOQueue::has_free_slots() const
{
const auto free_buffers = AK::atomic_load(&m_free_buffers, AK::MemoryOrder::memory_order_relaxed);
return free_buffers > 0;
}
Optional<u16> VirtIOQueue::take_free_slot()
{
VERIFY(m_lock.is_locked());
if (has_free_slots()) {
auto descriptor_index = m_free_head;
m_free_head = m_descriptors[descriptor_index].next;
--m_free_buffers;
return descriptor_index;
} else {
return {};
}
}
bool VirtIOQueue::should_notify() const
{
VERIFY(m_lock.is_locked());
auto device_flags = m_device->flags;
return !(device_flags & VIRTQ_USED_F_NO_NOTIFY);
}
bool VirtIOQueueChain::add_buffer_to_chain(PhysicalAddress buffer_start, size_t buffer_length, BufferType buffer_type)
{
VERIFY(m_queue.lock().is_locked());
// Ensure that no readable pages will be inserted after a writable one, as required by the VirtIO spec
VERIFY(buffer_type == BufferType::DeviceWritable || !m_chain_has_writable_pages);
m_chain_has_writable_pages |= (buffer_type == BufferType::DeviceWritable);
// Take a free slot from the queue
auto descriptor_index = m_queue.take_free_slot();
if (!descriptor_index.has_value())
return false;
if (!m_start_of_chain_index.has_value()) {
// Set start of chain if it hasn't been set
m_start_of_chain_index = descriptor_index.value();
} else {
// Link from previous element in VirtIOQueueChain
m_queue.m_descriptors[m_end_of_chain_index.value()].flags |= VIRTQ_DESC_F_NEXT;
m_queue.m_descriptors[m_end_of_chain_index.value()].next = descriptor_index.value();
}
// Update end of chain
m_end_of_chain_index = descriptor_index.value();
++m_chain_length;
// Populate buffer info
VERIFY(buffer_length <= NumericLimits<size_t>::max());
m_queue.m_descriptors[descriptor_index.value()].address = static_cast<u64>(buffer_start.get());
m_queue.m_descriptors[descriptor_index.value()].flags = static_cast<u16>(buffer_type);
m_queue.m_descriptors[descriptor_index.value()].length = static_cast<u32>(buffer_length);
return true;
}
void VirtIOQueueChain::submit_to_queue()
{
VERIFY(m_queue.lock().is_locked());
VERIFY(m_start_of_chain_index.has_value());
auto next_index = m_queue.m_driver_index_shadow % m_queue.m_queue_size;
m_queue.m_driver->rings[next_index] = m_start_of_chain_index.value();
m_queue.m_driver_index_shadow++;
full_memory_barrier();
m_queue.m_driver->index = m_queue.m_driver_index_shadow;
// Reset internal chain state
m_start_of_chain_index = m_end_of_chain_index = {};
m_chain_has_writable_pages = false;
m_chain_length = 0;
}
void VirtIOQueueChain::release_buffer_slots_to_queue()
{
VERIFY(m_queue.lock().is_locked());
if (m_start_of_chain_index.has_value()) {
// Add the currently stored chain back to the queue's free pool
m_queue.reclaim_buffer_chain(m_start_of_chain_index.value(), m_end_of_chain_index.value(), m_chain_length);
// Reset internal chain state
m_start_of_chain_index = m_end_of_chain_index = {};
m_chain_has_writable_pages = false;
m_chain_length = 0;
}
}
}

View file

@ -0,0 +1,186 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Locking/SpinLock.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/ScatterGatherList.h>
namespace Kernel {
#define VIRTQ_DESC_F_NEXT 1
#define VIRTQ_DESC_F_INDIRECT 4
#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
#define VIRTQ_USED_F_NO_NOTIFY 1
enum class BufferType {
DeviceReadable = 0,
DeviceWritable = 2
};
class VirtIODevice;
class VirtIOQueueChain;
class VirtIOQueue {
public:
VirtIOQueue(u16 queue_size, u16 notify_offset);
~VirtIOQueue();
bool is_null() const { return !m_queue_region; }
u16 notify_offset() const { return m_notify_offset; }
void enable_interrupts();
void disable_interrupts();
PhysicalAddress descriptor_area() const { return to_physical(m_descriptors.ptr()); }
PhysicalAddress driver_area() const { return to_physical(m_driver.ptr()); }
PhysicalAddress device_area() const { return to_physical(m_device.ptr()); }
bool new_data_available() const;
bool has_free_slots() const;
Optional<u16> take_free_slot();
VirtIOQueueChain pop_used_buffer_chain(size_t& used);
void discard_used_buffers();
SpinLock<u8>& lock() { return m_lock; }
bool should_notify() const;
private:
void reclaim_buffer_chain(u16 chain_start_index, u16 chain_end_index, size_t length_of_chain);
PhysicalAddress to_physical(const void* ptr) const
{
auto offset = FlatPtr(ptr) - m_queue_region->vaddr().get();
return m_queue_region->physical_page(0)->paddr().offset(offset);
}
struct [[gnu::packed]] VirtIOQueueDescriptor {
u64 address;
u32 length;
u16 flags;
u16 next;
};
struct [[gnu::packed]] VirtIOQueueDriver {
u16 flags;
u16 index;
u16 rings[];
};
struct [[gnu::packed]] VirtIOQueueDeviceItem {
u32 index;
u32 length;
};
struct [[gnu::packed]] VirtIOQueueDevice {
u16 flags;
u16 index;
VirtIOQueueDeviceItem rings[];
};
const u16 m_queue_size;
const u16 m_notify_offset;
u16 m_free_buffers;
u16 m_free_head { 0 };
u16 m_used_tail { 0 };
u16 m_driver_index_shadow { 0 };
OwnPtr<VirtIOQueueDescriptor> m_descriptors { nullptr };
OwnPtr<VirtIOQueueDriver> m_driver { nullptr };
OwnPtr<VirtIOQueueDevice> m_device { nullptr };
OwnPtr<Memory::Region> m_queue_region;
SpinLock<u8> m_lock;
friend class VirtIOQueueChain;
};
class VirtIOQueueChain {
public:
VirtIOQueueChain(VirtIOQueue& queue)
: m_queue(queue)
{
}
VirtIOQueueChain(VirtIOQueue& queue, u16 start_index, u16 end_index, size_t chain_length)
: m_queue(queue)
, m_start_of_chain_index(start_index)
, m_end_of_chain_index(end_index)
, m_chain_length(chain_length)
{
}
VirtIOQueueChain(VirtIOQueueChain&& other)
: m_queue(other.m_queue)
, m_start_of_chain_index(other.m_start_of_chain_index)
, m_end_of_chain_index(other.m_end_of_chain_index)
, m_chain_length(other.m_chain_length)
, m_chain_has_writable_pages(other.m_chain_has_writable_pages)
{
other.m_start_of_chain_index = {};
other.m_end_of_chain_index = {};
other.m_chain_length = 0;
other.m_chain_has_writable_pages = false;
}
VirtIOQueueChain& operator=(VirtIOQueueChain&& other)
{
VERIFY(&m_queue == &other.m_queue);
ensure_chain_is_empty();
m_start_of_chain_index = other.m_start_of_chain_index;
m_end_of_chain_index = other.m_end_of_chain_index;
m_chain_length = other.m_chain_length;
m_chain_has_writable_pages = other.m_chain_has_writable_pages;
other.m_start_of_chain_index = {};
other.m_end_of_chain_index = {};
other.m_chain_length = 0;
other.m_chain_has_writable_pages = false;
return *this;
}
~VirtIOQueueChain()
{
ensure_chain_is_empty();
}
[[nodiscard]] VirtIOQueue& queue() const { return m_queue; }
[[nodiscard]] bool is_empty() const { return m_chain_length == 0; }
[[nodiscard]] size_t length() const { return m_chain_length; }
bool add_buffer_to_chain(PhysicalAddress buffer_start, size_t buffer_length, BufferType buffer_type);
void submit_to_queue();
void release_buffer_slots_to_queue();
void for_each(Function<void(PhysicalAddress, size_t)> callback)
{
VERIFY(m_queue.lock().is_locked());
if (!m_start_of_chain_index.has_value())
return;
auto index = m_start_of_chain_index.value();
for (size_t i = 0; i < m_chain_length; ++i) {
auto addr = m_queue.m_descriptors[index].address;
auto length = m_queue.m_descriptors[index].length;
callback(PhysicalAddress(addr), length);
index = m_queue.m_descriptors[index].next;
}
}
private:
void ensure_chain_is_empty() const
{
VERIFY(!m_start_of_chain_index.has_value());
VERIFY(!m_end_of_chain_index.has_value());
VERIFY(m_chain_length == 0);
}
VirtIOQueue& m_queue;
Optional<u16> m_start_of_chain_index {};
Optional<u16> m_end_of_chain_index {};
size_t m_chain_length {};
bool m_chain_has_writable_pages { false };
};
}

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/VirtIO/VirtIORNG.h>
#include <Kernel/Sections.h>
namespace Kernel {
UNMAP_AFTER_INIT VirtIORNG::VirtIORNG(PCI::Address address)
: CharacterDevice(10, 183)
, VirtIODevice(address, "VirtIORNG")
{
bool success = negotiate_features([&](auto) {
return 0;
});
if (success) {
success = setup_queues(1);
}
if (success) {
finish_init();
m_entropy_buffer = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIORNG", Memory::Region::Access::ReadWrite);
if (m_entropy_buffer) {
memset(m_entropy_buffer->vaddr().as_ptr(), 0, m_entropy_buffer->size());
request_entropy_from_host();
}
}
}
VirtIORNG::~VirtIORNG()
{
}
bool VirtIORNG::handle_device_config_change()
{
VERIFY_NOT_REACHED(); // Device has no config
}
void VirtIORNG::handle_queue_update(u16 queue_index)
{
VERIFY(queue_index == REQUESTQ);
size_t available_entropy = 0, used;
auto& queue = get_queue(REQUESTQ);
{
ScopedSpinLock lock(queue.lock());
auto chain = queue.pop_used_buffer_chain(used);
if (chain.is_empty())
return;
VERIFY(chain.length() == 1);
chain.for_each([&available_entropy](PhysicalAddress, size_t length) {
available_entropy = length;
});
chain.release_buffer_slots_to_queue();
}
dbgln_if(VIRTIO_DEBUG, "VirtIORNG: received {} bytes of entropy!", available_entropy);
for (auto i = 0u; i < available_entropy; i++) {
m_entropy_source.add_random_event(m_entropy_buffer->vaddr().as_ptr()[i]);
}
// TODO: When should we get some more entropy?
}
void VirtIORNG::request_entropy_from_host()
{
auto& queue = get_queue(REQUESTQ);
ScopedSpinLock lock(queue.lock());
VirtIOQueueChain chain(queue);
chain.add_buffer_to_chain(m_entropy_buffer->physical_page(0)->paddr(), PAGE_SIZE, BufferType::DeviceWritable);
supply_chain_and_notify(REQUESTQ, chain);
}
}

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Bus/VirtIO/VirtIO.h>
#include <Kernel/Devices/CharacterDevice.h>
#include <Kernel/Random.h>
namespace Kernel {
#define REQUESTQ 0
class VirtIORNG final : public CharacterDevice
, public VirtIODevice {
public:
virtual StringView purpose() const override { return class_name(); }
virtual StringView class_name() const override { return m_class_name; }
virtual bool can_read(const FileDescription&, size_t) const override { return false; }
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return 0; }
virtual bool can_write(const FileDescription&, size_t) const override { return false; }
virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override { return 0; }
virtual mode_t required_mode() const override { return 0666; }
virtual String device_name() const override { return "hwrng"; }
VirtIORNG(PCI::Address);
virtual ~VirtIORNG() override;
private:
virtual bool handle_device_config_change() override;
virtual void handle_queue_update(u16 queue_index) override;
void request_entropy_from_host();
OwnPtr<Memory::Region> m_entropy_buffer;
EntropySource m_entropy_source;
};
}