1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 19:27:45 +00:00

Kernel: Modernize use of pointers in VirtIO

Raw pointers were mostly replaced with smart pointers and references
where appropriate based on kling and smartcomputer7's suggestions :)

Co-authored-by: Sahan <sahan.h.fernando@gmail.com>
This commit is contained in:
Idan Horowitz 2021-04-15 19:12:06 +10:00 committed by Andreas Kling
parent ea4c9efbb9
commit 4a467c553a
6 changed files with 146 additions and 156 deletions

View file

@ -44,9 +44,9 @@ void VirtIO::detect()
}); });
} }
VirtIODevice::VirtIODevice(PCI::Address address, const char* class_name) VirtIODevice::VirtIODevice(PCI::Address address, String class_name)
: PCI::Device(address, PCI::get_interrupt_line(address)) : PCI::Device(address, PCI::get_interrupt_line(address))
, m_class_name(class_name) , m_class_name(move(class_name))
, m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1)) , m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
{ {
dbgln("{}: Found @ {}", m_class_name, pci_address()); dbgln("{}: Found @ {}", m_class_name, pci_address());
@ -60,45 +60,40 @@ VirtIODevice::VirtIODevice(PCI::Address address, const char* class_name)
for (auto& capability : capabilities) { for (auto& capability : capabilities) {
if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) { if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
// We have a virtio_pci_cap // We have a virtio_pci_cap
Configuration cfg = {}; auto cfg = make<Configuration>();
cfg.cfg_type = capability.read8(0x3); auto raw_config_type = capability.read8(0x3);
switch (cfg.cfg_type) { if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
case VIRTIO_PCI_CAP_COMMON_CFG: dbgln("{}: Unknown capability configuration type: {}", m_class_name, raw_config_type);
case VIRTIO_PCI_CAP_NOTIFY_CFG: return;
case VIRTIO_PCI_CAP_ISR_CFG: }
case VIRTIO_PCI_CAP_DEVICE_CFG: cfg->cfg_type = static_cast<ConfigurationType>(raw_config_type);
case VIRTIO_PCI_CAP_PCI_CFG: {
auto cap_length = capability.read8(0x2); auto cap_length = capability.read8(0x2);
if (cap_length < 0x10) { if (cap_length < 0x10) {
dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length); dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length);
break; break;
} }
cfg.bar = capability.read8(0x4); cfg->bar = capability.read8(0x4);
if (cfg.bar > 0x5) { if (cfg->bar > 0x5) {
dbgln("{}: Unexpected capability bar value: {}", m_class_name, cfg.bar); dbgln("{}: Unexpected capability bar value: {}", m_class_name, cfg->bar);
break; break;
} }
cfg.offset = capability.read32(0x8); cfg->offset = capability.read32(0x8);
cfg.length = capability.read32(0xc); cfg->length = capability.read32(0xc);
dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, cfg.cfg_type, cfg.bar, cfg.offset, cfg.length); dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, (u32)cfg->cfg_type, cfg->bar, cfg->offset, cfg->length);
m_configs.append(cfg); if (cfg->cfg_type == ConfigurationType::Common)
if (cfg.cfg_type == VIRTIO_PCI_CAP_COMMON_CFG)
m_use_mmio = true; m_use_mmio = true;
else if (cfg.cfg_type == VIRTIO_PCI_CAP_NOTIFY_CFG) else if (cfg->cfg_type == ConfigurationType::Notify)
m_notify_multiplier = capability.read32(0x10); m_notify_multiplier = capability.read32(0x10);
break;
} m_configs.append(move(cfg));
default:
dbgln("{}: Unknown capability configuration type: {}", m_class_name, cfg.cfg_type);
break;
}
} }
} }
m_common_cfg = get_config(VIRTIO_PCI_CAP_COMMON_CFG, 0); if (m_use_mmio) {
m_notify_cfg = get_config(VIRTIO_PCI_CAP_NOTIFY_CFG, 0); m_common_cfg = get_config(ConfigurationType::Common, 0);
m_isr_cfg = get_config(VIRTIO_PCI_CAP_ISR_CFG, 0); m_notify_cfg = get_config(ConfigurationType::Notify, 0);
m_isr_cfg = get_config(ConfigurationType::ISR, 0);
}
set_status_bit(DEVICE_STATUS_DRIVER); set_status_bit(DEVICE_STATUS_DRIVER);
} }
@ -123,80 +118,80 @@ auto VirtIODevice::mapping_for_bar(u8 bar) -> MappedMMIO&
void VirtIODevice::notify_queue(u16 queue_index) void VirtIODevice::notify_queue(u16 queue_index)
{ {
dbgln("VirtIODevice: notifying about queue change at idx: {}", queue_index); dbgln("VirtIODevice: notifying about queue change at idx: {}", queue_index);
if (!m_use_mmio) if (!m_notify_cfg)
out<u16>(REG_QUEUE_NOTIFY, queue_index); out<u16>(REG_QUEUE_NOTIFY, queue_index);
else else
config_write16(m_notify_cfg, get_queue(queue_index)->notify_offset() * m_notify_multiplier, queue_index); config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
} }
u8 VirtIODevice::config_read8(const Configuration* config, u32 offset) u8 VirtIODevice::config_read8(const Configuration& config, u32 offset)
{ {
return mapping_for_bar(config->bar).read<u8>(config->offset + offset); return mapping_for_bar(config.bar).read<u8>(config.offset + offset);
} }
u16 VirtIODevice::config_read16(const Configuration* config, u32 offset) u16 VirtIODevice::config_read16(const Configuration& config, u32 offset)
{ {
return mapping_for_bar(config->bar).read<u16>(config->offset + offset); return mapping_for_bar(config.bar).read<u16>(config.offset + offset);
} }
u32 VirtIODevice::config_read32(const Configuration* config, u32 offset) u32 VirtIODevice::config_read32(const Configuration& config, u32 offset)
{ {
return mapping_for_bar(config->bar).read<u32>(config->offset + offset); return mapping_for_bar(config.bar).read<u32>(config.offset + offset);
} }
void VirtIODevice::config_write8(const Configuration* config, u32 offset, u8 value) void VirtIODevice::config_write8(const Configuration& config, u32 offset, u8 value)
{ {
mapping_for_bar(config->bar).write(config->offset + offset, value); mapping_for_bar(config.bar).write(config.offset + offset, value);
} }
void VirtIODevice::config_write16(const Configuration* config, u32 offset, u16 value) void VirtIODevice::config_write16(const Configuration& config, u32 offset, u16 value)
{ {
mapping_for_bar(config->bar).write(config->offset + offset, value); mapping_for_bar(config.bar).write(config.offset + offset, value);
} }
void VirtIODevice::config_write32(const Configuration* config, u32 offset, u32 value) void VirtIODevice::config_write32(const Configuration& config, u32 offset, u32 value)
{ {
mapping_for_bar(config->bar).write(config->offset + offset, value); mapping_for_bar(config.bar).write(config.offset + offset, value);
} }
void VirtIODevice::config_write64(const Configuration* config, u32 offset, u64 value) void VirtIODevice::config_write64(const Configuration& config, u32 offset, u64 value)
{ {
mapping_for_bar(config->bar).write(config->offset + offset, value); mapping_for_bar(config.bar).write(config.offset + offset, value);
} }
u8 VirtIODevice::read_status_bits() u8 VirtIODevice::read_status_bits()
{ {
if (!m_use_mmio) if (!m_common_cfg)
return in<u8>(REG_DEVICE_STATUS); return in<u8>(REG_DEVICE_STATUS);
return config_read8(m_common_cfg, COMMON_CFG_DEVICE_STATUS); return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
} }
void VirtIODevice::clear_status_bit(u8 status_bit) void VirtIODevice::clear_status_bit(u8 status_bit)
{ {
m_status &= status_bit; m_status &= status_bit;
if (!m_use_mmio) if (!m_common_cfg)
out<u8>(REG_DEVICE_STATUS, m_status); out<u8>(REG_DEVICE_STATUS, m_status);
else else
config_write8(m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status); config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
} }
void VirtIODevice::set_status_bit(u8 status_bit) void VirtIODevice::set_status_bit(u8 status_bit)
{ {
m_status |= status_bit; m_status |= status_bit;
if (!m_use_mmio) if (!m_common_cfg)
out<u8>(REG_DEVICE_STATUS, m_status); out<u8>(REG_DEVICE_STATUS, m_status);
else else
config_write8(m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status); config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
} }
u64 VirtIODevice::get_device_features() u64 VirtIODevice::get_device_features()
{ {
if (!m_use_mmio) if (!m_common_cfg)
return in<u32>(REG_DEVICE_FEATURES); return in<u32>(REG_DEVICE_FEATURES);
config_write32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0); config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
auto lower_bits = config_read32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE); auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
config_write32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1); config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
u64 upper_bits = (u64)config_read32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32; u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
return upper_bits | lower_bits; return upper_bits | lower_bits;
} }
@ -208,24 +203,24 @@ bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_feat
if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) { if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
accepted_features |= VIRTIO_F_VERSION_1; accepted_features |= VIRTIO_F_VERSION_1;
} else { } else {
dbgln("{}: legacy device detected", m_class_name); dbgln_if(VIRTIO_DEBUG, "{}: legacy device detected", m_class_name);
} }
if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) { if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
dbgln("{}: packed queues not yet supported", m_class_name); dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", m_class_name);
accepted_features &= ~(VIRTIO_F_RING_PACKED); accepted_features &= ~(VIRTIO_F_RING_PACKED);
} }
dbgln("VirtIOConsole: Device features: {}", device_features); dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", m_class_name, device_features);
dbgln("VirtIOConsole: Accepted features: {}", accepted_features); dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", m_class_name, accepted_features);
if (!m_use_mmio) { if (!m_common_cfg) {
out<u32>(REG_GUEST_FEATURES, accepted_features); out<u32>(REG_GUEST_FEATURES, accepted_features);
} else { } else {
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0); config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features); config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1); config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32); config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
} }
set_status_bit(DEVICE_STATUS_FEATURES_OK); set_status_bit(DEVICE_STATUS_FEATURES_OK);
m_status = read_status_bits(); m_status = read_status_bits();
@ -240,58 +235,44 @@ bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_feat
return true; return true;
} }
auto VirtIODevice::get_common_config(u32 index) const -> const Configuration*
{
if (index == 0)
return m_common_cfg;
return get_config(VIRTIO_PCI_CAP_COMMON_CFG, index);
}
auto VirtIODevice::get_device_config(u32 index) const -> const Configuration*
{
return get_config(VIRTIO_PCI_CAP_DEVICE_CFG, index);
}
void VirtIODevice::reset_device() void VirtIODevice::reset_device()
{ {
dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name); dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name);
if (!m_use_mmio) { if (!m_common_cfg) {
clear_status_bit(0); clear_status_bit(0);
while (read_status_bits() != 0) { while (read_status_bits() != 0) {
// TODO: delay a bit? // TODO: delay a bit?
} }
return; return;
} else if (m_common_cfg) { }
config_write8(m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0); config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
while (config_read8(m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) { while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
// TODO: delay a bit? // TODO: delay a bit?
} }
return; return;
} }
dbgln_if(VIRTIO_DEBUG, "{}: No handle to device, cant reset", m_class_name);
}
bool VirtIODevice::setup_queue(u16 queue_index) bool VirtIODevice::setup_queue(u16 queue_index)
{ {
if (!m_use_mmio || !m_common_cfg) if (!m_common_cfg)
return false; return false;
config_write16(m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index); config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
u16 queue_size = config_read16(m_common_cfg, COMMON_CFG_QUEUE_SIZE); u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
if (queue_size == 0) { if (queue_size == 0) {
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index); dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index);
return true; return true;
} }
u16 queue_notify_offset = config_read16(m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF); u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
auto queue = make<VirtIOQueue>(queue_size, queue_notify_offset); auto queue = make<VirtIOQueue>(queue_size, queue_notify_offset);
if (queue->is_null()) if (queue->is_null())
return false; return false;
config_write64(m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get()); config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
config_write64(m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get()); config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
config_write64(m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get()); config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] size: {}", m_class_name, queue_index, queue_size); dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] size: {}", m_class_name, queue_index, queue_size);
@ -307,7 +288,7 @@ void VirtIODevice::set_requested_queue_count(u16 count)
bool VirtIODevice::setup_queues() bool VirtIODevice::setup_queues()
{ {
if (m_common_cfg) { if (m_common_cfg) {
auto maximum_queue_count = config_read16(m_common_cfg, COMMON_CFG_NUM_QUEUES); auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
if (m_queue_count == 0) { if (m_queue_count == 0) {
m_queue_count = maximum_queue_count; m_queue_count = maximum_queue_count;
} else if (m_queue_count > maximum_queue_count) { } else if (m_queue_count > maximum_queue_count) {
@ -340,23 +321,27 @@ bool VirtIODevice::finish_init()
void VirtIODevice::supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType buffer_type) void VirtIODevice::supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType buffer_type)
{ {
VERIFY(queue_index < m_queue_count); VERIFY(queue_index < m_queue_count);
if (get_queue(queue_index)->supply_buffer(buffer, len, buffer_type)) if (get_queue(queue_index).supply_buffer(buffer, len, buffer_type))
notify_queue(queue_index); notify_queue(queue_index);
} }
u8 VirtIODevice::isr_status() u8 VirtIODevice::isr_status()
{ {
if (!m_use_mmio) if (!m_isr_cfg)
return in<u8>(REG_ISR_STATUS); return in<u8>(REG_ISR_STATUS);
return config_read8(m_isr_cfg, 0); return config_read8(*m_isr_cfg, 0);
} }
void VirtIODevice::handle_irq(const RegisterState&) void VirtIODevice::handle_irq(const RegisterState&)
{ {
u8 isr_type = isr_status(); u8 isr_type = isr_status();
dbgln_if(VIRTIO_DEBUG, "VirtIODevice: Handling interrupt with status: {}", isr_type); dbgln_if(VIRTIO_DEBUG, "VirtIODevice: Handling interrupt with status: {}", isr_type);
if (isr_type & DEVICE_CONFIG_INTERRUPT) if (isr_type & DEVICE_CONFIG_INTERRUPT) {
handle_device_config_change(); if (!handle_device_config_change()) {
set_status_bit(DEVICE_STATUS_FAILED);
dbgln("{}: Failed to handle device config change!", m_class_name);
}
}
if (isr_type & QUEUE_INTERRUPT) { if (isr_type & QUEUE_INTERRUPT) {
for (auto& queue : m_queues) { for (auto& queue : m_queues) {
if (queue.handle_interrupt()) if (queue.handle_interrupt())

View file

@ -84,6 +84,21 @@ namespace Kernel {
#define QUEUE_INTERRUPT 0x1 #define QUEUE_INTERRUPT 0x1
#define DEVICE_CONFIG_INTERRUPT 0x2 #define DEVICE_CONFIG_INTERRUPT 0x2
enum class ConfigurationType : u8 {
Common = 1,
Notify = 2,
ISR = 3,
Device = 4,
PCI = 5
};
struct Configuration {
ConfigurationType cfg_type;
u8 bar;
u32 offset;
u32 length;
};
class VirtIO { class VirtIO {
public: public:
static void detect(); static void detect();
@ -91,11 +106,11 @@ public:
class VirtIODevice : public PCI::Device { class VirtIODevice : public PCI::Device {
public: public:
VirtIODevice(PCI::Address, const char*); VirtIODevice(PCI::Address, String);
virtual ~VirtIODevice() override; virtual ~VirtIODevice() override;
protected: protected:
const char* const m_class_name; const String m_class_name;
struct MappedMMIO { struct MappedMMIO {
OwnPtr<Region> base; OwnPtr<Region> base;
@ -122,16 +137,9 @@ protected:
} }
}; };
struct Configuration { const Configuration* get_config(ConfigurationType cfg_type, u32 index = 0) const
u8 cfg_type;
u8 bar;
u32 offset;
u32 length;
};
const Configuration* get_config(u8 cfg_type, u32 index = 0) const
{ {
for (const auto& cfg : m_configs) { for (auto& cfg : m_configs) {
if (cfg.cfg_type != cfg_type) if (cfg.cfg_type != cfg_type)
continue; continue;
if (index > 0) { if (index > 0) {
@ -142,8 +150,6 @@ protected:
} }
return nullptr; return nullptr;
} }
const Configuration* get_common_config(u32 index = 0) const;
const Configuration* get_device_config(u32 index = 0) const;
template<typename F> template<typename F>
void read_config_atomic(F f) void read_config_atomic(F f)
@ -151,22 +157,22 @@ protected:
if (m_common_cfg) { if (m_common_cfg) {
u8 generation_before, generation_after; u8 generation_before, generation_after;
do { do {
generation_before = config_read8(m_common_cfg, 0x15); generation_before = config_read8(*m_common_cfg, 0x15);
f(); f();
generation_after = config_read8(m_common_cfg, 0x15); generation_after = config_read8(*m_common_cfg, 0x15);
} while (generation_before != generation_after); } while (generation_before != generation_after);
} else { } else {
f(); f();
} }
} }
u8 config_read8(const Configuration*, u32); u8 config_read8(const Configuration&, u32);
u16 config_read16(const Configuration*, u32); u16 config_read16(const Configuration&, u32);
u32 config_read32(const Configuration*, u32); u32 config_read32(const Configuration&, u32);
void config_write8(const Configuration*, u32, u8); void config_write8(const Configuration&, u32, u8);
void config_write16(const Configuration*, u32, u16); void config_write16(const Configuration&, u32, u16);
void config_write32(const Configuration*, u32, u32); void config_write32(const Configuration&, u32, u32);
void config_write64(const Configuration*, u32, u64); void config_write64(const Configuration&, u32, u64);
auto mapping_for_bar(u8) -> MappedMMIO&; auto mapping_for_bar(u8) -> MappedMMIO&;
@ -176,9 +182,10 @@ protected:
u64 get_device_features(); u64 get_device_features();
bool finish_init(); bool finish_init();
VirtIOQueue* get_queue(u16 queue_index) VirtIOQueue& get_queue(u16 queue_index)
{ {
return &m_queues[queue_index]; VERIFY(queue_index < m_queue_count);
return m_queues[queue_index];
} }
void set_requested_queue_count(u16); void set_requested_queue_count(u16);
@ -205,7 +212,7 @@ protected:
void supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType); void supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType);
virtual void handle_irq(const RegisterState&) override; virtual void handle_irq(const RegisterState&) override;
virtual void handle_device_config_change() = 0; virtual bool handle_device_config_change() = 0;
private: private:
template<typename T> template<typename T>
@ -231,7 +238,7 @@ private:
u8 isr_status(); u8 isr_status();
NonnullOwnPtrVector<VirtIOQueue> m_queues; NonnullOwnPtrVector<VirtIOQueue> m_queues;
Vector<Configuration> m_configs; NonnullOwnPtrVector<Configuration> m_configs;
const Configuration* m_common_cfg { nullptr }; // Cached due to high usage const Configuration* m_common_cfg { nullptr }; // Cached due to high usage
const Configuration* m_notify_cfg { nullptr }; // Cached due to high usage const Configuration* m_notify_cfg { nullptr }; // Cached due to high usage
const Configuration* m_isr_cfg { nullptr }; // Cached due to high usage const Configuration* m_isr_cfg { nullptr }; // Cached due to high usage

View file

@ -32,7 +32,7 @@ VirtIOConsole::VirtIOConsole(PCI::Address address)
: CharacterDevice(229, 0) : CharacterDevice(229, 0)
, VirtIODevice(address, "VirtIOConsole") , VirtIODevice(address, "VirtIOConsole")
{ {
if (auto* cfg = get_device_config()) { if (auto cfg = get_config(ConfigurationType::Device)) {
bool success = negotiate_features([&](u64 supported_features) { bool success = negotiate_features([&](u64 supported_features) {
u64 negotiated = 0; u64 negotiated = 0;
if (is_feature_set(supported_features, VIRTIO_CONSOLE_F_SIZE)) if (is_feature_set(supported_features, VIRTIO_CONSOLE_F_SIZE))
@ -46,11 +46,11 @@ VirtIOConsole::VirtIOConsole(PCI::Address address)
u16 cols = 0, rows = 0; u16 cols = 0, rows = 0;
read_config_atomic([&]() { read_config_atomic([&]() {
if (is_feature_accepted(VIRTIO_CONSOLE_F_SIZE)) { if (is_feature_accepted(VIRTIO_CONSOLE_F_SIZE)) {
cols = config_read16(cfg, 0x0); cols = config_read16(*cfg, 0x0);
rows = config_read16(cfg, 0x2); rows = config_read16(*cfg, 0x2);
} }
if (is_feature_accepted(VIRTIO_CONSOLE_F_MULTIPORT)) { if (is_feature_accepted(VIRTIO_CONSOLE_F_MULTIPORT)) {
max_nr_ports = config_read32(cfg, 0x4); max_nr_ports = config_read32(*cfg, 0x4);
} }
}); });
dbgln("VirtIOConsole: cols: {}, rows: {}, max nr ports {}", cols, rows, max_nr_ports); dbgln("VirtIOConsole: cols: {}, rows: {}, max nr ports {}", cols, rows, max_nr_ports);
@ -58,12 +58,10 @@ VirtIOConsole::VirtIOConsole(PCI::Address address)
success = finish_init(); success = finish_init();
} }
if (success) { if (success) {
m_receive_queue = get_queue(RECEIVEQ); get_queue(RECEIVEQ).on_data_available = [&]() {
m_receive_queue->on_data_available = [&]() {
dbgln("VirtIOConsole: receive_queue on_data_available"); dbgln("VirtIOConsole: receive_queue on_data_available");
}; };
m_send_queue = get_queue(TRANSMITQ); get_queue(TRANSMITQ).on_data_available = [&]() {
m_send_queue->on_data_available = [&]() {
dbgln("VirtIOConsole: send_queue on_data_available"); dbgln("VirtIOConsole: send_queue on_data_available");
}; };
dbgln("TODO: Populate receive queue with a receive buffer"); dbgln("TODO: Populate receive queue with a receive buffer");
@ -75,9 +73,10 @@ VirtIOConsole::~VirtIOConsole()
{ {
} }
void VirtIOConsole::handle_device_config_change() bool VirtIOConsole::handle_device_config_change()
{ {
dbgln("VirtIOConsole: Handle device config change"); dbgln("VirtIOConsole: Handle device config change");
return true;
} }
bool VirtIOConsole::can_read(const FileDescription&, size_t) const bool VirtIOConsole::can_read(const FileDescription&, size_t) const

View file

@ -47,7 +47,7 @@ public:
virtual ~VirtIOConsole() override; virtual ~VirtIOConsole() override;
private: private:
virtual const char* class_name() const override { return m_class_name; } virtual const char* class_name() const override { return m_class_name.characters(); }
virtual bool can_read(const FileDescription&, size_t) const override; virtual bool can_read(const FileDescription&, size_t) const override;
virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override; virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override;
@ -56,7 +56,7 @@ private:
virtual mode_t required_mode() const override { return 0666; } virtual mode_t required_mode() const override { return 0666; }
virtual void handle_device_config_change() override; virtual bool handle_device_config_change() override;
virtual String device_name() const override { return String::formatted("hvc{}", minor()); } virtual String device_name() const override { return String::formatted("hvc{}", minor()); }
VirtIOQueue* m_receive_queue { nullptr }; VirtIOQueue* m_receive_queue { nullptr };

View file

@ -86,8 +86,7 @@ bool VirtIOQueue::supply_buffer(const u8* buffer, u32 len, BufferType buffer_typ
full_memory_barrier(); full_memory_barrier();
auto device_flags = m_device->flags; auto device_flags = m_device->flags;
dbgln("VirtIODevice: supplied buffer... should notify: {}", device_flags); return !(device_flags & 1); // if bit 1 is enabled the device disabled interrupts
return device_flags & 1;
} }
bool VirtIOQueue::new_data_available() const bool VirtIOQueue::new_data_available() const
{ {

View file

@ -47,9 +47,9 @@ public:
void enable_interrupts(); void enable_interrupts();
void disable_interrupts(); void disable_interrupts();
PhysicalAddress descriptor_area() const { return to_physical(m_descriptors); } PhysicalAddress descriptor_area() const { return to_physical(m_descriptors.ptr()); }
PhysicalAddress driver_area() const { return to_physical(m_driver); } PhysicalAddress driver_area() const { return to_physical(m_driver.ptr()); }
PhysicalAddress device_area() const { return to_physical(m_device); } PhysicalAddress device_area() const { return to_physical(m_device.ptr()); }
bool supply_buffer(const u8* buffer, u32 len, BufferType); bool supply_buffer(const u8* buffer, u32 len, BufferType);
bool new_data_available() const; bool new_data_available() const;
@ -58,30 +58,30 @@ public:
Function<void()> on_data_available; Function<void()> on_data_available;
private: private:
PhysicalAddress to_physical(void* ptr) const PhysicalAddress to_physical(const void* ptr) const
{ {
auto offset = FlatPtr(ptr) - m_region->vaddr().get(); auto offset = FlatPtr(ptr) - m_region->vaddr().get();
return m_region->physical_page(0)->paddr().offset(offset); return m_region->physical_page(0)->paddr().offset(offset);
} }
struct VirtIOQueueDescriptor { struct [[gnu::packed]] VirtIOQueueDescriptor {
u64 address; u64 address;
u32 length; u32 length;
u16 flags; u16 flags;
u16 next; u16 next;
}; };
struct VirtIOQueueDriver { struct [[gnu::packed]] VirtIOQueueDriver {
u16 flags; u16 flags;
u16 index; u16 index;
u16 rings[]; u16 rings[];
}; };
struct VirtIOQueueDeviceItem { struct [[gnu::packed]] VirtIOQueueDeviceItem {
u32 index; u32 index;
u32 length; u32 length;
}; };
struct VirtIOQueueDevice { struct [[gnu::packed]] VirtIOQueueDevice {
u16 flags; u16 flags;
u16 index; u16 index;
VirtIOQueueDeviceItem rings[]; VirtIOQueueDeviceItem rings[];
@ -93,9 +93,9 @@ private:
u16 m_free_head { 0 }; u16 m_free_head { 0 };
u16 m_used_tail { 0 }; u16 m_used_tail { 0 };
VirtIOQueueDescriptor* m_descriptors { nullptr }; OwnPtr<VirtIOQueueDescriptor> m_descriptors { nullptr };
VirtIOQueueDriver* m_driver { nullptr }; OwnPtr<VirtIOQueueDriver> m_driver { nullptr };
VirtIOQueueDevice* m_device { nullptr }; OwnPtr<VirtIOQueueDevice> m_device { nullptr };
OwnPtr<Region> m_region; OwnPtr<Region> m_region;
SpinLock<u8> m_lock; SpinLock<u8> m_lock;
}; };