1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 03:58:12 +00:00

Kernel/Graphics: Apply DisplayConnector design on the VirtIO driver

This commit is contained in:
Liav A 2022-04-30 13:56:29 +03:00 committed by Andreas Kling
parent 4ff6150f1b
commit c27c414ed1
13 changed files with 729 additions and 886 deletions

View file

@ -89,7 +89,7 @@ set(KERNEL_SOURCES
Graphics/Intel/NativeGraphicsAdapter.cpp
Graphics/VGA/ISAAdapter.cpp
Graphics/VGA/PCIAdapter.cpp
Graphics/VirtIOGPU/FramebufferDevice.cpp
Graphics/VirtIOGPU/DisplayConnector.cpp
Graphics/VirtIOGPU/Console.cpp
Graphics/VirtIOGPU/GPU3DDevice.cpp
Graphics/VirtIOGPU/GraphicsAdapter.cpp

View file

@ -306,8 +306,9 @@ ErrorOr<void> DisplayConnector::ioctl(OpenFileDescription&, unsigned request, Us
TRY(copy_from_user(&user_dirty_rect, &flush_rects.rects[i]));
{
SpinlockLocker control_locker(m_control_lock);
if (console_mode())
if (console_mode()) {
return {};
}
TRY(flush_rectangle(flush_rects.buffer_index, user_dirty_rect));
}
}
@ -319,10 +320,13 @@ ErrorOr<void> DisplayConnector::ioctl(OpenFileDescription&, unsigned request, Us
// WindowServer is not ready yet to handle errors such as EBUSY currently.
MutexLocker locker(m_flushing_lock);
SpinlockLocker control_locker(m_control_lock);
if (console_mode())
if (console_mode()) {
return {};
}
if (!flush_support())
return Error::from_errno(ENOTSUP);
TRY(flush_first_surface());
return {};
}

View file

@ -198,7 +198,7 @@ UNMAP_AFTER_INIT bool GraphicsManagement::determine_and_initialize_graphics_devi
break;
case PCI::VendorID::VirtIO:
dmesgln("Graphics: Using VirtIO console");
adapter = Graphics::VirtIOGPU::GraphicsAdapter::initialize(device_identifier);
adapter = VirtIOGraphicsAdapter::initialize(device_identifier);
break;
default:
if (!is_vga_compatible_pci_device(device_identifier))

View file

@ -5,7 +5,6 @@
*/
#include <Kernel/Graphics/VirtIOGPU/Console.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/WorkQueue.h>
namespace Kernel::Graphics::VirtIOGPU {
@ -30,22 +29,22 @@ void DirtyRect::union_rect(size_t x, size_t y, size_t width, size_t height)
}
}
NonnullRefPtr<Console> Console::initialize(RefPtr<FramebufferDevice> const& framebuffer_device)
NonnullRefPtr<Console> Console::initialize(VirtIODisplayConnector& parent_display_connector)
{
return adopt_ref(*new Console(framebuffer_device));
auto current_resolution = parent_display_connector.current_mode_setting();
return adopt_ref(*new Console(parent_display_connector, current_resolution));
}
Console::Console(RefPtr<FramebufferDevice> const& framebuffer_device)
: GenericFramebufferConsole(framebuffer_device->width(), framebuffer_device->height(), framebuffer_device->pitch())
, m_framebuffer_device(framebuffer_device)
Console::Console(VirtIODisplayConnector const& parent_display_connector, DisplayConnector::ModeSetting current_resolution)
: GenericFramebufferConsole(current_resolution.horizontal_active, current_resolution.vertical_active, current_resolution.horizontal_stride)
, m_parent_display_connector(parent_display_connector)
{
enqueue_refresh_timer();
}
void Console::set_resolution(size_t width, size_t height, size_t pitch)
void Console::set_resolution(size_t, size_t, size_t)
{
auto did_set_resolution = m_framebuffer_device->set_head_resolution(0, width, height, pitch);
VERIFY(!did_set_resolution.is_error());
// FIXME: Update some values here?
}
void Console::flush(size_t x, size_t y, size_t width, size_t height)
@ -58,16 +57,12 @@ void Console::enqueue_refresh_timer()
NonnullRefPtr<Timer> refresh_timer = adopt_ref(*new Timer());
refresh_timer->setup(CLOCK_MONOTONIC, refresh_interval, [this]() {
auto rect = m_dirty_rect;
if (rect.is_dirty()) {
Protocol::Rect dirty_rect {
.x = (u32)rect.x(),
.y = (u32)rect.y(),
.width = (u32)rect.width(),
.height = (u32)rect.height(),
};
// FIXME: Do something sanely here if we can't allocate a work queue?
MUST(g_io_work->try_queue([this, dirty_rect]() {
m_framebuffer_device->flush_dirty_window(dirty_rect, m_framebuffer_device->current_buffer());
if (m_enabled.load() && rect.is_dirty()) {
MUST(g_io_work->try_queue([this]() {
{
MutexLocker locker(m_parent_display_connector->m_flushing_lock);
MUST(m_parent_display_connector->flush_first_surface());
}
m_dirty_rect.clear();
}));
}
@ -78,16 +73,17 @@ void Console::enqueue_refresh_timer()
void Console::enable()
{
auto current_resolution = m_parent_display_connector->current_mode_setting();
GenericFramebufferConsole::enable();
m_width = m_framebuffer_device->width();
m_height = m_framebuffer_device->height();
m_pitch = m_framebuffer_device->pitch();
m_width = current_resolution.horizontal_active;
m_height = current_resolution.vertical_active;
m_pitch = current_resolution.horizontal_stride;
m_dirty_rect.union_rect(0, 0, m_width, m_height);
}
u8* Console::framebuffer_data()
{
return m_framebuffer_device->framebuffer_data();
return m_parent_display_connector->framebuffer_data();
}
}

View file

@ -7,7 +7,7 @@
#pragma once
#include <Kernel/Graphics/Console/GenericFramebufferConsole.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/Graphics/VirtIOGPU/DisplayConnector.h>
#include <Kernel/TimerQueue.h>
namespace Kernel::Graphics::VirtIOGPU {
@ -32,7 +32,7 @@ private:
class Console final : public GenericFramebufferConsole {
public:
static NonnullRefPtr<Console> initialize(RefPtr<FramebufferDevice> const&);
static NonnullRefPtr<Console> initialize(VirtIODisplayConnector& parent_display_connector);
virtual void set_resolution(size_t width, size_t height, size_t pitch) override;
virtual void flush(size_t x, size_t y, size_t width, size_t height) override;
@ -42,8 +42,8 @@ private:
void enqueue_refresh_timer();
virtual u8* framebuffer_data() override;
Console(RefPtr<FramebufferDevice> const&);
RefPtr<FramebufferDevice> m_framebuffer_device;
Console(VirtIODisplayConnector const& parent_display_connector, DisplayConnector::ModeSetting current_resolution);
NonnullRefPtr<VirtIODisplayConnector> m_parent_display_connector;
DirtyRect m_dirty_rect;
};

View file

@ -0,0 +1,365 @@
/*
* Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/API/VirGL.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Graphics/GraphicsManagement.h>
#include <Kernel/Graphics/VirtIOGPU/Console.h>
#include <Kernel/Graphics/VirtIOGPU/DisplayConnector.h>
#include <Kernel/Graphics/VirtIOGPU/GraphicsAdapter.h>
#include <Kernel/Graphics/VirtIOGPU/Protocol.h>
#include <Kernel/Random.h>
namespace Kernel {
NonnullRefPtr<VirtIODisplayConnector> VirtIODisplayConnector::must_create(VirtIOGraphicsAdapter& graphics_adapter, Graphics::VirtIOGPU::ScanoutID scanout_id)
{
auto device_or_error = DeviceManagement::try_create_device<VirtIODisplayConnector>(graphics_adapter, scanout_id);
VERIFY(!device_or_error.is_error());
auto connector = device_or_error.release_value();
connector->initialize_console();
return connector;
}
VirtIODisplayConnector::VirtIODisplayConnector(VirtIOGraphicsAdapter& graphics_adapter, Graphics::VirtIOGPU::ScanoutID scanout_id)
: DisplayConnector()
, m_graphics_adapter(graphics_adapter)
, m_scanout_id(scanout_id)
{
}
void VirtIODisplayConnector::initialize_console()
{
m_console = Kernel::Graphics::VirtIOGPU::Console::initialize(*this);
}
void VirtIODisplayConnector::set_safe_mode_setting_after_initialization(Badge<VirtIOGraphicsAdapter>)
{
MUST(set_safe_mode_setting());
}
ErrorOr<void> VirtIODisplayConnector::set_mode_setting(ModeSetting const& mode_setting)
{
SpinlockLocker locker(m_modeset_lock);
if (mode_setting.horizontal_active > MAX_VIRTIOGPU_RESOLUTION_WIDTH || mode_setting.vertical_active > MAX_VIRTIOGPU_RESOLUTION_HEIGHT)
return Error::from_errno(ENOTSUP);
auto& info = m_display_info;
info.rect = {
.x = 0,
.y = 0,
.width = (u32)mode_setting.horizontal_active,
.height = (u32)mode_setting.vertical_active,
};
TRY(create_framebuffer());
DisplayConnector::ModeSetting mode_set {
.horizontal_stride = info.rect.width * sizeof(u32),
.pixel_clock_in_khz = 0, // Note: There's no pixel clock in paravirtualized hardware
.horizontal_active = info.rect.width,
.horizontal_front_porch_pixels = 0, // Note: There's no horizontal_front_porch_pixels in paravirtualized hardware
.horizontal_sync_time_pixels = 0, // Note: There's no horizontal_sync_time_pixels in paravirtualized hardware
.horizontal_blank_pixels = 0, // Note: There's no horizontal_blank_pixels in paravirtualized hardware
.vertical_active = info.rect.height,
.vertical_front_porch_lines = 0, // Note: There's no vertical_front_porch_lines in paravirtualized hardware
.vertical_sync_time_lines = 0, // Note: There's no vertical_sync_time_lines in paravirtualized hardware
.vertical_blank_lines = 0, // Note: There's no vertical_blank_lines in paravirtualized hardware
.horizontal_offset = 0,
.vertical_offset = 0,
};
m_current_mode_setting = mode_set;
return {};
}
ErrorOr<void> VirtIODisplayConnector::set_safe_mode_setting()
{
DisplayConnector::ModeSetting safe_mode_setting {
.horizontal_stride = 1024 * sizeof(u32),
.pixel_clock_in_khz = 0, // Note: There's no pixel clock in paravirtualized hardware
.horizontal_active = 1024,
.horizontal_front_porch_pixels = 0, // Note: There's no horizontal_front_porch_pixels in paravirtualized hardware
.horizontal_sync_time_pixels = 0, // Note: There's no horizontal_sync_time_pixels in paravirtualized hardware
.horizontal_blank_pixels = 0, // Note: There's no horizontal_blank_pixels in paravirtualized hardware
.vertical_active = 768,
.vertical_front_porch_lines = 0, // Note: There's no vertical_front_porch_lines in paravirtualized hardware
.vertical_sync_time_lines = 0, // Note: There's no vertical_sync_time_lines in paravirtualized hardware
.vertical_blank_lines = 0, // Note: There's no vertical_blank_lines in paravirtualized hardware
.horizontal_offset = 0,
.vertical_offset = 0,
};
return set_mode_setting(safe_mode_setting);
}
ErrorOr<void> VirtIODisplayConnector::set_y_offset(size_t y)
{
VERIFY(m_control_lock.is_locked());
if (y == 0)
m_current_buffer = &m_main_buffer;
else if (y == m_display_info.rect.height)
m_current_buffer = &m_back_buffer;
else
return Error::from_errno(EINVAL);
return {};
}
ErrorOr<void> VirtIODisplayConnector::unblank()
{
return Error::from_errno(ENOTIMPL);
}
ErrorOr<size_t> VirtIODisplayConnector::write_to_first_surface(u64 offset, UserOrKernelBuffer const& buffer, size_t length)
{
VERIFY(m_control_lock.is_locked());
if (offset + length > (m_buffer_size * 2))
return Error::from_errno(EOVERFLOW);
if (offset < m_buffer_size && (offset + length) > (m_buffer_size))
return Error::from_errno(EOVERFLOW);
if (offset < m_buffer_size) {
TRY(buffer.read(m_main_buffer.framebuffer_data + offset, 0, length));
} else {
TRY(buffer.read(m_back_buffer.framebuffer_data + offset - m_buffer_size, 0, length));
}
return length;
}
ErrorOr<void> VirtIODisplayConnector::flush_rectangle(size_t buffer_index, FBRect const& rect)
{
VERIFY(m_flushing_lock.is_locked());
SpinlockLocker locker(m_graphics_adapter->operation_lock());
Graphics::VirtIOGPU::Protocol::Rect dirty_rect {
.x = rect.x,
.y = rect.y,
.width = rect.width,
.height = rect.height
};
auto& buffer = buffer_from_index(buffer_index);
transfer_framebuffer_data_to_host(dirty_rect, buffer);
if (&buffer == m_current_buffer) {
// Flushing directly to screen
flush_displayed_image(dirty_rect, buffer);
buffer.dirty_rect = {};
} else {
if (buffer.dirty_rect.width == 0 || buffer.dirty_rect.height == 0) {
buffer.dirty_rect = dirty_rect;
} else {
auto current_dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
auto current_dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
buffer.dirty_rect.x = min(buffer.dirty_rect.x, dirty_rect.x);
buffer.dirty_rect.y = min(buffer.dirty_rect.y, dirty_rect.y);
buffer.dirty_rect.width = max(current_dirty_right, dirty_rect.x + dirty_rect.width) - buffer.dirty_rect.x;
buffer.dirty_rect.height = max(current_dirty_bottom, dirty_rect.y + dirty_rect.height) - buffer.dirty_rect.y;
}
}
return {};
}
ErrorOr<void> VirtIODisplayConnector::flush_first_surface()
{
VERIFY(m_flushing_lock.is_locked());
SpinlockLocker locker(m_graphics_adapter->operation_lock());
Graphics::VirtIOGPU::Protocol::Rect dirty_rect {
.x = 0,
.y = 0,
.width = m_display_info.rect.width,
.height = m_display_info.rect.height
};
auto& buffer = buffer_from_index(0);
transfer_framebuffer_data_to_host(dirty_rect, buffer);
if (&buffer == m_current_buffer) {
// Flushing directly to screen
flush_displayed_image(dirty_rect, buffer);
buffer.dirty_rect = {};
} else {
if (buffer.dirty_rect.width == 0 || buffer.dirty_rect.height == 0) {
buffer.dirty_rect = dirty_rect;
} else {
auto current_dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
auto current_dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
buffer.dirty_rect.x = min(buffer.dirty_rect.x, dirty_rect.x);
buffer.dirty_rect.y = min(buffer.dirty_rect.y, dirty_rect.y);
buffer.dirty_rect.width = max(current_dirty_right, dirty_rect.x + dirty_rect.width) - buffer.dirty_rect.x;
buffer.dirty_rect.height = max(current_dirty_bottom, dirty_rect.y + dirty_rect.height) - buffer.dirty_rect.y;
}
}
return {};
}
void VirtIODisplayConnector::enable_console()
{
VERIFY(m_control_lock.is_locked());
VERIFY(m_console);
m_console->enable();
}
void VirtIODisplayConnector::disable_console()
{
VERIFY(m_control_lock.is_locked());
VERIFY(m_console);
m_console->disable();
}
void VirtIODisplayConnector::clear_to_black(Buffer& buffer)
{
size_t width = m_display_info.rect.width;
size_t height = m_display_info.rect.height;
u8* data = buffer.framebuffer_data;
for (size_t i = 0; i < width * height; ++i) {
data[4 * i + 0] = 0x00;
data[4 * i + 1] = 0x00;
data[4 * i + 2] = 0x00;
data[4 * i + 3] = 0xff;
}
}
void VirtIODisplayConnector::draw_ntsc_test_pattern(Buffer& buffer)
{
constexpr u8 colors[12][4] = {
{ 0xff, 0xff, 0xff, 0xff }, // White
{ 0x00, 0xff, 0xff, 0xff }, // Primary + Composite colors
{ 0xff, 0xff, 0x00, 0xff },
{ 0x00, 0xff, 0x00, 0xff },
{ 0xff, 0x00, 0xff, 0xff },
{ 0x00, 0x00, 0xff, 0xff },
{ 0xff, 0x00, 0x00, 0xff },
{ 0xba, 0x01, 0x5f, 0xff }, // Dark blue
{ 0x8d, 0x3d, 0x00, 0xff }, // Purple
{ 0x22, 0x22, 0x22, 0xff }, // Shades of gray
{ 0x10, 0x10, 0x10, 0xff },
{ 0x00, 0x00, 0x00, 0xff },
};
size_t width = m_display_info.rect.width;
size_t height = m_display_info.rect.height;
u8* data = buffer.framebuffer_data;
// Draw NTSC test card
for (size_t y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
size_t color = 0;
if (3 * y < 2 * height) {
// Top 2/3 of image is 7 vertical stripes of color spectrum
color = (7 * x) / width;
} else if (4 * y < 3 * height) {
// 2/3 mark to 3/4 mark is backwards color spectrum alternating with black
auto segment = (7 * x) / width;
color = segment % 2 ? 10 : 6 - segment;
} else {
if (28 * x < 5 * width) {
color = 8;
} else if (28 * x < 10 * width) {
color = 0;
} else if (28 * x < 15 * width) {
color = 7;
} else if (28 * x < 20 * width) {
color = 10;
} else if (7 * x < 6 * width) {
// Grayscale gradient
color = 26 - ((21 * x) / width);
} else {
// Solid black
color = 10;
}
}
u8* pixel = &data[4 * (y * width + x)];
for (int i = 0; i < 4; ++i) {
pixel[i] = colors[color][i];
}
}
}
dbgln_if(VIRTIO_DEBUG, "Finish drawing the pattern");
}
u8* VirtIODisplayConnector::framebuffer_data()
{
return m_current_buffer->framebuffer_data;
}
ErrorOr<void> VirtIODisplayConnector::create_framebuffer()
{
SpinlockLocker locker(m_graphics_adapter->operation_lock());
// First delete any existing framebuffers to free the memory first
m_framebuffer = nullptr;
m_framebuffer_sink_vmobject = nullptr;
// Allocate frame buffer for both front and back
m_buffer_size = calculate_framebuffer_size(m_display_info.rect.width, m_display_info.rect.height);
auto region_name = TRY(KString::formatted("VirtGPU FrameBuffer #{}", m_scanout_id.value()));
m_framebuffer = TRY(MM.allocate_kernel_region(m_buffer_size * 2, region_name->view(), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
auto write_sink_page = TRY(MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No));
auto num_needed_pages = m_framebuffer->vmobject().page_count();
NonnullRefPtrVector<Memory::PhysicalPage> pages;
for (auto i = 0u; i < num_needed_pages; ++i) {
TRY(pages.try_append(write_sink_page));
}
m_framebuffer_sink_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span()));
m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());
create_buffer(m_main_buffer, 0, m_buffer_size);
create_buffer(m_back_buffer, m_buffer_size, m_buffer_size);
return {};
}
void VirtIODisplayConnector::set_edid_bytes(Badge<VirtIOGraphicsAdapter>, Array<u8, 128> const& edid_bytes)
{
DisplayConnector::set_edid_bytes(edid_bytes);
}
Graphics::VirtIOGPU::Protocol::DisplayInfoResponse::Display VirtIODisplayConnector::display_information(Badge<VirtIOGraphicsAdapter>)
{
return m_display_info;
}
void VirtIODisplayConnector::create_buffer(Buffer& buffer, size_t framebuffer_offset, size_t framebuffer_size)
{
VERIFY(m_graphics_adapter->operation_lock().is_locked());
buffer.framebuffer_offset = framebuffer_offset;
buffer.framebuffer_data = m_framebuffer->vaddr().as_ptr() + framebuffer_offset;
// 1. Create BUFFER using VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
if (buffer.resource_id.value() != 0)
m_graphics_adapter->delete_resource(buffer.resource_id);
buffer.resource_id = m_graphics_adapter->create_2d_resource(m_display_info.rect);
// 2. Attach backing storage using VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
m_graphics_adapter->ensure_backing_storage(buffer.resource_id, *m_framebuffer, buffer.framebuffer_offset, framebuffer_size);
// 3. Use VIRTIO_GPU_CMD_SET_SCANOUT to link the framebuffer to a display scanout.
if (&buffer == m_current_buffer)
m_graphics_adapter->set_scanout_resource(m_scanout_id, buffer.resource_id, m_display_info.rect);
// 4. Render our test pattern
draw_ntsc_test_pattern(buffer);
// 5. Use VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D to update the host resource from guest memory.
transfer_framebuffer_data_to_host(m_display_info.rect, buffer);
// 6. Use VIRTIO_GPU_CMD_RESOURCE_FLUSH to flush the updated resource to the display.
if (&buffer == m_current_buffer)
flush_displayed_image(m_display_info.rect, buffer);
// Make sure we constrain the existing dirty rect (if any)
if (buffer.dirty_rect.width != 0 || buffer.dirty_rect.height != 0) {
auto dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
auto dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
buffer.dirty_rect.width = min(dirty_right, m_display_info.rect.x + m_display_info.rect.width) - buffer.dirty_rect.x;
buffer.dirty_rect.height = min(dirty_bottom, m_display_info.rect.y + m_display_info.rect.height) - buffer.dirty_rect.y;
}
m_display_info.enabled = 1;
}
void VirtIODisplayConnector::transfer_framebuffer_data_to_host(Graphics::VirtIOGPU::Protocol::Rect const& rect, Buffer& buffer)
{
m_graphics_adapter->transfer_framebuffer_data_to_host(m_scanout_id, buffer.resource_id, rect);
}
void VirtIODisplayConnector::flush_dirty_window(Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect, Buffer& buffer)
{
m_graphics_adapter->flush_dirty_rectangle(m_scanout_id, buffer.resource_id, dirty_rect);
}
void VirtIODisplayConnector::flush_displayed_image(Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect, Buffer& buffer)
{
m_graphics_adapter->flush_displayed_image(buffer.resource_id, dirty_rect);
}
}

View file

@ -0,0 +1,123 @@
/*
* Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/BinaryBufferWriter.h>
#include <AK/DistinctNumeric.h>
#include <Kernel/Devices/CharacterDevice.h>
#include <Kernel/Graphics/Console/Console.h>
#include <Kernel/Graphics/DisplayConnector.h>
#include <Kernel/Graphics/VirtIOGPU/GraphicsAdapter.h>
#include <Kernel/Graphics/VirtIOGPU/Protocol.h>
#include <Kernel/Memory/Region.h>
#include <LibEDID/EDID.h>
namespace Kernel::Graphics::VirtIOGPU {
class Console;
}
namespace Kernel {
class VirtIOGraphicsAdapter;
class VirtIODisplayConnector final : public DisplayConnector {
friend class Graphics::VirtIOGPU::Console;
friend class DeviceManagement;
private:
struct Buffer {
size_t framebuffer_offset { 0 };
u8* framebuffer_data { nullptr };
Graphics::VirtIOGPU::Protocol::Rect dirty_rect {};
Graphics::VirtIOGPU::ResourceID resource_id { 0 };
};
public:
static NonnullRefPtr<VirtIODisplayConnector> must_create(VirtIOGraphicsAdapter& graphics_adapter, Graphics::VirtIOGPU::ScanoutID scanout_id);
void set_edid_bytes(Badge<VirtIOGraphicsAdapter>, Array<u8, 128> const& edid_bytes);
void set_safe_mode_setting_after_initialization(Badge<VirtIOGraphicsAdapter>);
Graphics::VirtIOGPU::Protocol::DisplayInfoResponse::Display display_information(Badge<VirtIOGraphicsAdapter>);
private:
void initialize_console();
virtual bool mutable_mode_setting_capable() const override { return true; }
virtual bool double_framebuffering_capable() const override { return true; }
virtual bool partial_flush_support() const override { return true; }
virtual ErrorOr<void> set_mode_setting(ModeSetting const&) override;
virtual ErrorOr<void> set_safe_mode_setting() override;
virtual ErrorOr<void> set_y_offset(size_t y) override;
virtual ErrorOr<void> unblank() override;
// Note: VirtIO hardware requires a constant refresh to keep the screen in sync to the user.
virtual bool flush_support() const override { return true; }
// Note: Paravirtualized hardware doesn't require a defined refresh rate for modesetting.
virtual bool refresh_rate_support() const override { return false; }
virtual ErrorOr<size_t> write_to_first_surface(u64 offset, UserOrKernelBuffer const&, size_t length) override;
virtual ErrorOr<void> flush_first_surface() override;
virtual ErrorOr<void> flush_rectangle(size_t buffer_index, FBRect const& rect) override;
virtual void enable_console() override;
virtual void disable_console() override;
private:
VirtIODisplayConnector(VirtIOGraphicsAdapter& graphics_adapter, Graphics::VirtIOGPU::ScanoutID scanout_id);
void query_display_information();
ErrorOr<void> query_edid_from_virtio_adapter();
void query_display_edid();
void flush_dirty_window(Graphics::VirtIOGPU::Protocol::Rect const&, Buffer&);
void transfer_framebuffer_data_to_host(Graphics::VirtIOGPU::Protocol::Rect const&, Buffer&);
void flush_displayed_image(Graphics::VirtIOGPU::Protocol::Rect const&, Buffer&);
// Basic 2D framebuffer methods
static size_t calculate_framebuffer_size(size_t width, size_t height)
{
// VirtIO resources can only map on page boundaries!
return Memory::page_round_up(sizeof(u32) * width * height).value();
}
u8* framebuffer_data();
void draw_ntsc_test_pattern(Buffer&);
void clear_to_black(Buffer&);
ErrorOr<void> create_framebuffer();
void create_buffer(Buffer&, size_t, size_t);
void set_buffer(int);
static bool is_valid_buffer_index(int buffer_index)
{
return buffer_index == 0 || buffer_index == 1;
}
Buffer& buffer_from_index(int buffer_index)
{
return buffer_index == 0 ? m_main_buffer : m_back_buffer;
}
Buffer& current_buffer() const { return *m_current_buffer; }
// Member data
// Context used for kernel operations (e.g. flushing resources to scanout)
Graphics::VirtIOGPU::ContextID m_kernel_context_id;
NonnullRefPtr<VirtIOGraphicsAdapter> m_graphics_adapter;
RefPtr<Graphics::Console> m_console;
Graphics::VirtIOGPU::Protocol::DisplayInfoResponse::Display m_display_info {};
Graphics::VirtIOGPU::ScanoutID m_scanout_id;
// 2D framebuffer Member data
size_t m_buffer_size { 0 };
Buffer* m_current_buffer { nullptr };
Atomic<int, AK::memory_order_relaxed> m_last_set_buffer_index { 0 };
Buffer m_main_buffer;
Buffer m_back_buffer;
OwnPtr<Memory::Region> m_framebuffer;
RefPtr<Memory::VMObject> m_framebuffer_sink_vmobject;
constexpr static size_t NUM_TRANSFER_REGION_PAGES = 256;
};
}

View file

@ -1,402 +0,0 @@
/*
* Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>
* Copyright (c) 2022, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Graphics/GraphicsManagement.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/Graphics/VirtIOGPU/GraphicsAdapter.h>
#include <LibC/sys/ioctl_numbers.h>
namespace Kernel::Graphics::VirtIOGPU {
RefPtr<GraphicsAdapter> FramebufferDevice::adapter() const
{
auto adapter = m_graphics_adapter.strong_ref();
// FIXME: Propagate error gracefully
VERIFY(adapter);
return static_cast<GraphicsAdapter&>(*adapter);
}
ErrorOr<size_t> FramebufferDevice::buffer_length(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
SpinlockLocker locker(m_resolution_lock);
return display_info().rect.width * display_info().rect.height * 4;
}
ErrorOr<size_t> FramebufferDevice::pitch(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
SpinlockLocker locker(m_resolution_lock);
return display_info().rect.width * 4;
}
ErrorOr<size_t> FramebufferDevice::height(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
SpinlockLocker locker(m_resolution_lock);
return display_info().rect.height;
}
ErrorOr<size_t> FramebufferDevice::width(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
SpinlockLocker locker(m_resolution_lock);
return display_info().rect.width;
}
ErrorOr<size_t> FramebufferDevice::vertical_offset(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
return 0;
}
ErrorOr<bool> FramebufferDevice::vertical_offsetted(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
return false;
}
ErrorOr<void> FramebufferDevice::set_head_resolution(size_t head, size_t width, size_t height, size_t)
{
// Note: This class doesn't support multihead setup (yet!).
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
if (width > MAX_VIRTIOGPU_RESOLUTION_WIDTH || height > MAX_VIRTIOGPU_RESOLUTION_HEIGHT)
return Error::from_errno(ENOTSUP);
auto& info = display_info();
info.rect = {
.x = 0,
.y = 0,
.width = (u32)width,
.height = (u32)height,
};
// FIXME: Would be nice to be able to return ErrorOr here.
TRY(create_framebuffer());
return {};
}
ErrorOr<void> FramebufferDevice::set_head_buffer(size_t, bool)
{
return Error::from_errno(ENOTSUP);
}
ErrorOr<void> FramebufferDevice::flush_head_buffer(size_t)
{
// Note: This class doesn't support flushing.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally reach this code, assert.
VERIFY_NOT_REACHED();
}
ErrorOr<void> FramebufferDevice::flush_rectangle(size_t buffer_index, FBRect const& rect)
{
MutexLocker locker(adapter()->operation_lock());
Protocol::Rect dirty_rect {
.x = rect.x,
.y = rect.y,
.width = rect.width,
.height = rect.height
};
// FIXME: Find a better ErrorOr<void> here.
if (!m_are_writes_active)
return Error::from_errno(EIO);
auto& buffer = buffer_from_index(buffer_index);
transfer_framebuffer_data_to_host(dirty_rect, buffer);
if (&buffer == m_current_buffer) {
// Flushing directly to screen
flush_displayed_image(dirty_rect, buffer);
buffer.dirty_rect = {};
} else {
if (buffer.dirty_rect.width == 0 || buffer.dirty_rect.height == 0) {
buffer.dirty_rect = dirty_rect;
} else {
auto current_dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
auto current_dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
buffer.dirty_rect.x = min(buffer.dirty_rect.x, dirty_rect.x);
buffer.dirty_rect.y = min(buffer.dirty_rect.y, dirty_rect.y);
buffer.dirty_rect.width = max(current_dirty_right, dirty_rect.x + dirty_rect.width) - buffer.dirty_rect.x;
buffer.dirty_rect.height = max(current_dirty_bottom, dirty_rect.y + dirty_rect.height) - buffer.dirty_rect.y;
}
}
return {};
}
ErrorOr<ByteBuffer> FramebufferDevice::get_edid(size_t head) const
{
// Note: This FramebufferDevice class doesn't support multihead setup.
// We take care to verify this at the GenericFramebufferDevice::ioctl method
// so if we happen to accidentally have a value different than 0, assert.
VERIFY(head == 0);
return adapter()->get_edid(m_scanout.value());
}
FramebufferDevice::FramebufferDevice(GraphicsAdapter const& adapter, ScanoutID scanout)
: GenericFramebufferDevice(adapter)
, m_scanout(scanout)
{
if (display_info().enabled) {
// FIXME: This should be in a place where we can handle allocation failures.
auto result = create_framebuffer();
VERIFY(!result.is_error());
}
}
FramebufferDevice::~FramebufferDevice() = default;
ErrorOr<void> FramebufferDevice::create_framebuffer()
{
MutexLocker locker(adapter()->operation_lock());
// First delete any existing framebuffers to free the memory first
m_framebuffer = nullptr;
m_framebuffer_sink_vmobject = nullptr;
// Allocate frame buffer for both front and back
auto& info = display_info();
m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height);
auto region_name = TRY(KString::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()));
m_framebuffer = TRY(MM.allocate_kernel_region(m_buffer_size * 2, region_name->view(), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
auto write_sink_page = TRY(MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No));
auto num_needed_pages = m_framebuffer->vmobject().page_count();
NonnullRefPtrVector<Memory::PhysicalPage> pages;
for (auto i = 0u; i < num_needed_pages; ++i) {
TRY(pages.try_append(write_sink_page));
}
m_framebuffer_sink_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span()));
m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());
create_buffer(m_main_buffer, 0, m_buffer_size);
create_buffer(m_back_buffer, m_buffer_size, m_buffer_size);
return {};
}
void FramebufferDevice::create_buffer(Buffer& buffer, size_t framebuffer_offset, size_t framebuffer_size)
{
buffer.framebuffer_offset = framebuffer_offset;
buffer.framebuffer_data = m_framebuffer->vaddr().as_ptr() + framebuffer_offset;
auto& info = display_info();
// 1. Create BUFFER using VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
if (buffer.resource_id.value() != 0)
adapter()->delete_resource(buffer.resource_id);
buffer.resource_id = adapter()->create_2d_resource(info.rect);
// 2. Attach backing storage using VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
adapter()->ensure_backing_storage(buffer.resource_id, *m_framebuffer, buffer.framebuffer_offset, framebuffer_size);
// 3. Use VIRTIO_GPU_CMD_SET_SCANOUT to link the framebuffer to a display scanout.
if (&buffer == m_current_buffer)
adapter()->set_scanout_resource(m_scanout.value(), buffer.resource_id, info.rect);
// 4. Render our test pattern
draw_ntsc_test_pattern(buffer);
// 5. Use VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D to update the host resource from guest memory.
transfer_framebuffer_data_to_host(info.rect, buffer);
// 6. Use VIRTIO_GPU_CMD_RESOURCE_FLUSH to flush the updated resource to the display.
if (&buffer == m_current_buffer)
flush_displayed_image(info.rect, buffer);
// Make sure we constrain the existing dirty rect (if any)
if (buffer.dirty_rect.width != 0 || buffer.dirty_rect.height != 0) {
auto dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width;
auto dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height;
buffer.dirty_rect.width = min(dirty_right, info.rect.x + info.rect.width) - buffer.dirty_rect.x;
buffer.dirty_rect.height = min(dirty_bottom, info.rect.y + info.rect.height) - buffer.dirty_rect.y;
}
info.enabled = 1;
}
Protocol::DisplayInfoResponse::Display const& FramebufferDevice::display_info() const
{
return adapter()->display_info(m_scanout);
}
Protocol::DisplayInfoResponse::Display& FramebufferDevice::display_info()
{
return adapter()->display_info(m_scanout);
}
void FramebufferDevice::transfer_framebuffer_data_to_host(Protocol::Rect const& rect, Buffer& buffer)
{
adapter()->transfer_framebuffer_data_to_host(m_scanout, buffer.resource_id, rect);
}
void FramebufferDevice::flush_dirty_window(Protocol::Rect const& dirty_rect, Buffer& buffer)
{
adapter()->flush_dirty_rectangle(m_scanout, buffer.resource_id, dirty_rect);
}
void FramebufferDevice::flush_displayed_image(Protocol::Rect const& dirty_rect, Buffer& buffer)
{
adapter()->flush_displayed_image(buffer.resource_id, dirty_rect);
}
void FramebufferDevice::set_buffer(int buffer_index)
{
auto& buffer = buffer_index == 0 ? m_main_buffer : m_back_buffer;
MutexLocker locker(adapter()->operation_lock());
if (&buffer == m_current_buffer)
return;
m_current_buffer = &buffer;
adapter()->set_scanout_resource(m_scanout.value(), buffer.resource_id, display_info().rect);
adapter()->flush_displayed_image(buffer.resource_id, buffer.dirty_rect); // QEMU SDL backend requires this (as per spec)
buffer.dirty_rect = {};
}
ErrorOr<Memory::Region*> FramebufferDevice::mmap(Process& process, OpenFileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
{
TRY(process.require_promise(Pledge::video));
if (!shared)
return ENODEV;
if (offset != 0 || !m_framebuffer)
return ENXIO;
if (range.size() > m_framebuffer->size())
return EOVERFLOW;
// We only allow one process to map the region
if (m_userspace_mmap_region)
return ENOMEM;
RefPtr<Memory::VMObject> vmobject;
if (m_are_writes_active) {
vmobject = TRY(m_framebuffer->vmobject().try_clone());
} else {
vmobject = m_framebuffer_sink_vmobject;
if (vmobject.is_null())
return ENOMEM;
}
m_userspace_mmap_region = TRY(process.address_space().allocate_region_with_vmobject(
range,
vmobject.release_nonnull(),
0,
"VirtIOGPU Framebuffer",
prot,
shared));
return m_userspace_mmap_region.unsafe_ptr();
}
void FramebufferDevice::deactivate_writes()
{
m_are_writes_active = false;
if (m_userspace_mmap_region) {
auto* region = m_userspace_mmap_region.unsafe_ptr();
auto maybe_vm_object = m_framebuffer_sink_vmobject->try_clone();
// FIXME: Would be nice to be able to return a ErrorOr<void> here.
VERIFY(!maybe_vm_object.is_error());
region->set_vmobject(maybe_vm_object.release_value());
region->remap();
}
set_buffer(0);
clear_to_black(buffer_from_index(0));
}
void FramebufferDevice::activate_writes()
{
m_are_writes_active = true;
auto last_set_buffer_index = m_last_set_buffer_index.load();
if (m_userspace_mmap_region) {
auto* region = m_userspace_mmap_region.unsafe_ptr();
region->set_vmobject(m_framebuffer->vmobject());
region->remap();
}
set_buffer(last_set_buffer_index);
}
void FramebufferDevice::clear_to_black(Buffer& buffer)
{
auto& info = display_info();
size_t width = info.rect.width;
size_t height = info.rect.height;
u8* data = buffer.framebuffer_data;
for (size_t i = 0; i < width * height; ++i) {
data[4 * i + 0] = 0x00;
data[4 * i + 1] = 0x00;
data[4 * i + 2] = 0x00;
data[4 * i + 3] = 0xff;
}
}
void FramebufferDevice::draw_ntsc_test_pattern(Buffer& buffer)
{
constexpr u8 colors[12][4] = {
{ 0xff, 0xff, 0xff, 0xff }, // White
{ 0x00, 0xff, 0xff, 0xff }, // Primary + Composite colors
{ 0xff, 0xff, 0x00, 0xff },
{ 0x00, 0xff, 0x00, 0xff },
{ 0xff, 0x00, 0xff, 0xff },
{ 0x00, 0x00, 0xff, 0xff },
{ 0xff, 0x00, 0x00, 0xff },
{ 0xba, 0x01, 0x5f, 0xff }, // Dark blue
{ 0x8d, 0x3d, 0x00, 0xff }, // Purple
{ 0x22, 0x22, 0x22, 0xff }, // Shades of gray
{ 0x10, 0x10, 0x10, 0xff },
{ 0x00, 0x00, 0x00, 0xff },
};
auto& info = display_info();
size_t width = info.rect.width;
size_t height = info.rect.height;
u8* data = buffer.framebuffer_data;
// Draw NTSC test card
for (size_t y = 0; y < height; ++y) {
for (size_t x = 0; x < width; ++x) {
size_t color = 0;
if (3 * y < 2 * height) {
// Top 2/3 of image is 7 vertical stripes of color spectrum
color = (7 * x) / width;
} else if (4 * y < 3 * height) {
// 2/3 mark to 3/4 mark is backwards color spectrum alternating with black
auto segment = (7 * x) / width;
color = segment % 2 ? 10 : 6 - segment;
} else {
if (28 * x < 5 * width) {
color = 8;
} else if (28 * x < 10 * width) {
color = 0;
} else if (28 * x < 15 * width) {
color = 7;
} else if (28 * x < 20 * width) {
color = 10;
} else if (7 * x < 6 * width) {
// Grayscale gradient
color = 26 - ((21 * x) / width);
} else {
// Solid black
color = 10;
}
}
u8* pixel = &data[4 * (y * width + x)];
for (int i = 0; i < 4; ++i) {
pixel[i] = colors[color][i];
}
}
}
dbgln_if(VIRTIO_DEBUG, "Finish drawing the pattern");
}
u8* FramebufferDevice::framebuffer_data()
{
return m_current_buffer->framebuffer_data;
}
}

View file

@ -1,110 +0,0 @@
/*
* Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/Queue.h>
#include <Kernel/Graphics/FramebufferDevice.h>
#include <Kernel/Graphics/GenericGraphicsAdapter.h>
#include <Kernel/Graphics/VirtIOGPU/Protocol.h>
namespace Kernel::Graphics::VirtIOGPU {
class GraphicsAdapter;
class FramebufferDevice final : public GenericFramebufferDevice {
friend class Console;
struct Buffer {
size_t framebuffer_offset { 0 };
u8* framebuffer_data { nullptr };
Protocol::Rect dirty_rect {};
ResourceID resource_id { 0 };
};
public:
FramebufferDevice(GraphicsAdapter const&, ScanoutID);
virtual ~FramebufferDevice() override;
virtual ErrorOr<void> try_to_initialize() override { return {}; }
virtual void deactivate_writes() override;
virtual void activate_writes() override;
size_t width() const { return display_info().rect.width; }
size_t height() const { return display_info().rect.height; }
size_t pitch() const { return display_info().rect.width * 4; }
static size_t calculate_framebuffer_size(size_t width, size_t height)
{
// VirtIO resources can only map on page boundaries!
return Memory::page_round_up(sizeof(u32) * width * height).value();
}
u8* framebuffer_data();
private:
virtual bool multihead_support() const override { return false; }
virtual bool flushing_support() const override { return false; }
virtual bool partial_flushing_support() const override { return true; }
virtual size_t heads_count() const override { return 1; }
virtual ErrorOr<size_t> buffer_length(size_t head) const override;
virtual ErrorOr<size_t> pitch(size_t head) const override;
virtual ErrorOr<size_t> height(size_t head) const override;
virtual ErrorOr<size_t> width(size_t head) const override;
virtual ErrorOr<size_t> vertical_offset(size_t head) const override;
virtual ErrorOr<bool> vertical_offsetted(size_t head) const override;
virtual ErrorOr<void> set_head_resolution(size_t head, size_t width, size_t height, size_t pitch) override;
virtual ErrorOr<void> set_head_buffer(size_t head, bool second_buffer) override;
virtual ErrorOr<void> flush_head_buffer(size_t head) override;
virtual ErrorOr<void> flush_rectangle(size_t head, FBRect const&) override;
virtual ErrorOr<ByteBuffer> get_edid(size_t head) const override;
void flush_dirty_window(Protocol::Rect const&, Buffer&);
void transfer_framebuffer_data_to_host(Protocol::Rect const&, Buffer&);
void flush_displayed_image(Protocol::Rect const&, Buffer&);
void draw_ntsc_test_pattern(Buffer&);
Protocol::DisplayInfoResponse::Display const& display_info() const;
Protocol::DisplayInfoResponse::Display& display_info();
void clear_to_black(Buffer&);
ErrorOr<void> create_framebuffer();
void create_buffer(Buffer&, size_t, size_t);
void set_buffer(int);
virtual ErrorOr<Memory::Region*> mmap(Process&, OpenFileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override;
static bool is_valid_buffer_index(int buffer_index)
{
return buffer_index == 0 || buffer_index == 1;
}
Buffer& buffer_from_index(int buffer_index)
{
return buffer_index == 0 ? m_main_buffer : m_back_buffer;
}
Buffer& current_buffer() const { return *m_current_buffer; }
RefPtr<GraphicsAdapter> adapter() const;
const ScanoutID m_scanout;
Buffer* m_current_buffer { nullptr };
Atomic<int, AK::memory_order_relaxed> m_last_set_buffer_index { 0 };
Buffer m_main_buffer;
Buffer m_back_buffer;
OwnPtr<Memory::Region> m_framebuffer;
RefPtr<Memory::VMObject> m_framebuffer_sink_vmobject;
size_t m_buffer_size { 0 };
bool m_are_writes_active { true };
// FIXME: This needs to be cleaned up if the WindowServer exits while we are in a tty
WeakPtr<Memory::Region> m_userspace_mmap_region;
};
}

View file

@ -13,15 +13,15 @@
#include <Kernel/Random.h>
#include <LibC/sys/ioctl_numbers.h>
namespace Kernel::Graphics::VirtIOGPU {
namespace Kernel {
GPU3DDevice::PerContextState::PerContextState(ContextID context_id, OwnPtr<Memory::Region> transfer_buffer_region)
VirtIOGPU3DDevice::PerContextState::PerContextState(Graphics::VirtIOGPU::ContextID context_id, OwnPtr<Memory::Region> transfer_buffer_region)
: m_context_id(context_id)
, m_transfer_buffer_region(move(transfer_buffer_region))
{
}
NonnullRefPtr<GPU3DDevice> GPU3DDevice::must_create(GraphicsAdapter& adapter)
NonnullRefPtr<VirtIOGPU3DDevice> VirtIOGPU3DDevice::must_create(VirtIOGraphicsAdapter const& adapter)
{
// Setup memory transfer region
auto region_result = MM.allocate_kernel_region(
@ -30,25 +30,25 @@ NonnullRefPtr<GPU3DDevice> GPU3DDevice::must_create(GraphicsAdapter& adapter)
Memory::Region::Access::ReadWrite,
AllocationStrategy::AllocateNow);
VERIFY(!region_result.is_error());
auto device = MUST(DeviceManagement::try_create_device<VirtIOGPU::GPU3DDevice>(adapter, region_result.release_value()));
auto device = MUST(DeviceManagement::try_create_device<VirtIOGPU3DDevice>(adapter, region_result.release_value()));
return device;
}
GPU3DDevice::GPU3DDevice(GraphicsAdapter& graphics_adapter, NonnullOwnPtr<Memory::Region> transfer_buffer_region)
VirtIOGPU3DDevice::VirtIOGPU3DDevice(VirtIOGraphicsAdapter const& graphics_adapter, NonnullOwnPtr<Memory::Region> transfer_buffer_region)
: CharacterDevice(28, 0)
, m_graphics_adapter(graphics_adapter)
, m_transfer_buffer_region(move(transfer_buffer_region))
{
m_kernel_context_id = m_graphics_adapter.create_context();
m_kernel_context_id = m_graphics_adapter->create_context();
}
void GPU3DDevice::detach(OpenFileDescription& description)
void VirtIOGPU3DDevice::detach(OpenFileDescription& description)
{
m_context_state_lookup.remove(&description);
CharacterDevice::detach(description);
}
ErrorOr<RefPtr<GPU3DDevice::PerContextState>> GPU3DDevice::get_context_for_description(OpenFileDescription& description)
ErrorOr<RefPtr<VirtIOGPU3DDevice::PerContextState>> VirtIOGPU3DDevice::get_context_for_description(OpenFileDescription& description)
{
auto res = m_context_state_lookup.get(&description);
if (!res.has_value())
@ -56,16 +56,16 @@ ErrorOr<RefPtr<GPU3DDevice::PerContextState>> GPU3DDevice::get_context_for_descr
return res.value();
}
ErrorOr<void> GPU3DDevice::ioctl(OpenFileDescription& description, unsigned request, Userspace<void*> arg)
ErrorOr<void> VirtIOGPU3DDevice::ioctl(OpenFileDescription& description, unsigned request, Userspace<void*> arg)
{
// TODO: We really should have ioctls for destroying resources as well
switch (request) {
case VIRGL_IOCTL_CREATE_CONTEXT: {
if (m_context_state_lookup.contains(&description))
return EEXIST;
MutexLocker locker(m_graphics_adapter.operation_lock());
SpinlockLocker locker(m_graphics_adapter->operation_lock());
// TODO: Delete the context if it fails to be set in m_context_state_lookup
auto context_id = m_graphics_adapter.create_context();
auto context_id = m_graphics_adapter->create_context();
RefPtr<PerContextState> per_context_state = TRY(PerContextState::try_create(context_id));
TRY(m_context_state_lookup.try_set(&description, per_context_state));
return {};
@ -92,10 +92,10 @@ ErrorOr<void> GPU3DDevice::ioctl(OpenFileDescription& description, unsigned requ
}
case VIRGL_IOCTL_SUBMIT_CMD: {
auto context_id = TRY(get_context_for_description(description))->context_id();
MutexLocker locker(m_graphics_adapter.operation_lock());
SpinlockLocker locker(m_graphics_adapter->operation_lock());
auto user_command_buffer = static_ptr_cast<VirGLCommandBuffer const*>(arg);
auto command_buffer = TRY(copy_typed_from_user(user_command_buffer));
m_graphics_adapter.submit_command_buffer(context_id, [&](Bytes buffer) {
m_graphics_adapter->submit_command_buffer(context_id, [&](Bytes buffer) {
auto num_bytes = command_buffer.num_elems * sizeof(u32);
VERIFY(num_bytes <= buffer.size());
MUST(copy_from_user(buffer.data(), command_buffer.data, num_bytes));
@ -108,8 +108,8 @@ ErrorOr<void> GPU3DDevice::ioctl(OpenFileDescription& description, unsigned requ
auto user_spec = static_ptr_cast<VirGL3DResourceSpec const*>(arg);
VirGL3DResourceSpec spec = TRY(copy_typed_from_user(user_spec));
Protocol::Resource3DSpecification const resource_spec = {
.target = static_cast<Protocol::Gallium::PipeTextureTarget>(spec.target),
Graphics::VirtIOGPU::Protocol::Resource3DSpecification const resource_spec = {
.target = static_cast<Graphics::VirtIOGPU::Protocol::Gallium::PipeTextureTarget>(spec.target),
.format = spec.format,
.bind = spec.bind,
.width = spec.width,
@ -121,10 +121,10 @@ ErrorOr<void> GPU3DDevice::ioctl(OpenFileDescription& description, unsigned requ
.flags = spec.flags,
.padding = 0,
};
MutexLocker locker(m_graphics_adapter.operation_lock());
auto resource_id = m_graphics_adapter.create_3d_resource(resource_spec).value();
m_graphics_adapter.attach_resource_to_context(resource_id, per_context_state->context_id());
m_graphics_adapter.ensure_backing_storage(resource_id, per_context_state->transfer_buffer_region(), 0, NUM_TRANSFER_REGION_PAGES * PAGE_SIZE);
SpinlockLocker locker(m_graphics_adapter->operation_lock());
auto resource_id = m_graphics_adapter->create_3d_resource(resource_spec).value();
m_graphics_adapter->attach_resource_to_context(resource_id, per_context_state->context_id());
m_graphics_adapter->ensure_backing_storage(resource_id, per_context_state->transfer_buffer_region(), 0, NUM_TRANSFER_REGION_PAGES * PAGE_SIZE);
spec.created_resource_id = resource_id;
// FIXME: We should delete the resource we just created if we fail to copy the resource id out
return copy_to_user(static_ptr_cast<VirGL3DResourceSpec*>(arg), &spec);

View file

@ -9,7 +9,6 @@
#include <AK/DistinctNumeric.h>
#include <Kernel/Devices/CharacterDevice.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/Graphics/VirtIOGPU/Protocol.h>
namespace Kernel::Graphics::VirtIOGPU {
@ -87,18 +86,23 @@ union ClearType {
u32 value;
};
class GPU3DDevice : public CharacterDevice {
friend class Kernel::DeviceManagement;
}
namespace Kernel {
class VirtIOGraphicsAdapter;
class VirtIOGPU3DDevice : public CharacterDevice {
friend class DeviceManagement;
public:
static NonnullRefPtr<GPU3DDevice> must_create(GraphicsAdapter&);
static NonnullRefPtr<VirtIOGPU3DDevice> must_create(VirtIOGraphicsAdapter const&);
private:
GPU3DDevice(GraphicsAdapter& graphics_adapter, NonnullOwnPtr<Memory::Region> transfer_buffer_region);
VirtIOGPU3DDevice(VirtIOGraphicsAdapter const& graphics_adapter, NonnullOwnPtr<Memory::Region> transfer_buffer_region);
class PerContextState : public RefCounted<PerContextState> {
public:
static ErrorOr<RefPtr<PerContextState>> try_create(ContextID context_id)
static ErrorOr<RefPtr<PerContextState>> try_create(Graphics::VirtIOGPU::ContextID context_id)
{
auto region_result = TRY(MM.allocate_kernel_region(
NUM_TRANSFER_REGION_PAGES * PAGE_SIZE,
@ -107,13 +111,13 @@ private:
AllocationStrategy::AllocateNow));
return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PerContextState(context_id, move(region_result))));
}
ContextID context_id() { return m_context_id; }
Graphics::VirtIOGPU::ContextID context_id() { return m_context_id; }
Memory::Region& transfer_buffer_region() { return *m_transfer_buffer_region; }
private:
PerContextState() = delete;
explicit PerContextState(ContextID context_id, OwnPtr<Memory::Region> transfer_buffer_region);
ContextID m_context_id;
explicit PerContextState(Graphics::VirtIOGPU::ContextID context_id, OwnPtr<Memory::Region> transfer_buffer_region);
Graphics::VirtIOGPU::ContextID m_context_id;
OwnPtr<Memory::Region> m_transfer_buffer_region;
};
@ -129,9 +133,9 @@ private:
private:
ErrorOr<RefPtr<PerContextState>> get_context_for_description(OpenFileDescription&);
Kernel::Graphics::VirtIOGPU::GraphicsAdapter& m_graphics_adapter;
NonnullRefPtr<VirtIOGraphicsAdapter> m_graphics_adapter;
// Context used for kernel operations (e.g. flushing resources to scanout)
ContextID m_kernel_context_id;
Graphics::VirtIOGPU::ContextID m_kernel_context_id;
HashMap<OpenFileDescription*, RefPtr<PerContextState>> m_context_state_lookup;
// Memory management for backing buffers
NonnullOwnPtr<Memory::Region> m_transfer_buffer_region;

View file

@ -11,67 +11,52 @@
#include <Kernel/Graphics/Console/GenericFramebufferConsole.h>
#include <Kernel/Graphics/GraphicsManagement.h>
#include <Kernel/Graphics/VirtIOGPU/Console.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/Graphics/VirtIOGPU/DisplayConnector.h>
#include <Kernel/Graphics/VirtIOGPU/GPU3DDevice.h>
#include <Kernel/Graphics/VirtIOGPU/GraphicsAdapter.h>
namespace Kernel::Graphics::VirtIOGPU {
namespace Kernel {
#define DEVICE_EVENTS_READ 0x0
#define DEVICE_EVENTS_CLEAR 0x4
#define DEVICE_NUM_SCANOUTS 0x8
NonnullRefPtr<GraphicsAdapter> GraphicsAdapter::initialize(PCI::DeviceIdentifier const& device_identifier)
NonnullRefPtr<VirtIOGraphicsAdapter> VirtIOGraphicsAdapter::initialize(PCI::DeviceIdentifier const& device_identifier)
{
VERIFY(device_identifier.hardware_id().vendor_id == PCI::VendorID::VirtIO);
auto adapter = adopt_ref(*new GraphicsAdapter(device_identifier));
// Setup memory transfer region
auto scratch_space_region = MUST(MM.allocate_contiguous_kernel_region(
32 * PAGE_SIZE,
"VirtGPU Scratch Space",
Memory::Region::Access::ReadWrite));
auto adapter = adopt_ref(*new (nothrow) VirtIOGraphicsAdapter(device_identifier, move(scratch_space_region)));
adapter->initialize();
MUST(adapter->initialize_adapter());
return adapter;
}
GraphicsAdapter::GraphicsAdapter(PCI::DeviceIdentifier const& device_identifier)
ErrorOr<void> VirtIOGraphicsAdapter::initialize_adapter()
{
VERIFY(m_num_scanouts <= VIRTIO_GPU_MAX_SCANOUTS);
for (size_t index = 0; index < m_num_scanouts; index++) {
auto display_connector = VirtIODisplayConnector::must_create(*this, index);
m_scanouts[index].display_connector = display_connector;
MUST(query_and_set_edid(index, *display_connector));
display_connector->set_safe_mode_setting_after_initialization({});
}
return {};
}
VirtIOGraphicsAdapter::VirtIOGraphicsAdapter(PCI::DeviceIdentifier const& device_identifier, NonnullOwnPtr<Memory::Region> scratch_space_region)
: VirtIO::Device(device_identifier)
, m_scratch_space(move(scratch_space_region))
{
auto region_or_error = MM.allocate_contiguous_kernel_region(32 * PAGE_SIZE, "VirtGPU Scratch Space", Memory::Region::Access::ReadWrite);
if (region_or_error.is_error())
TODO();
m_scratch_space = region_or_error.release_value();
}
void GraphicsAdapter::initialize_framebuffer_devices()
void VirtIOGraphicsAdapter::initialize()
{
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Initializing framebuffer devices");
VERIFY(!m_created_framebuffer_devices);
create_framebuffer_devices();
m_created_framebuffer_devices = true;
GraphicsManagement::the().set_console(*default_console());
}
void GraphicsAdapter::enable_consoles()
{
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Enabling consoles");
for_each_framebuffer([&](auto& framebuffer, auto& console) {
framebuffer.deactivate_writes();
console.enable();
return IterationDecision::Continue;
});
}
void GraphicsAdapter::disable_consoles()
{
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Disabling consoles");
for_each_framebuffer([&](auto& framebuffer, auto& console) {
console.disable();
framebuffer.activate_writes();
return IterationDecision::Continue;
});
}
void GraphicsAdapter::initialize()
{
Device::initialize();
VERIFY(!!m_scratch_space);
VirtIO::Device::initialize();
if (auto* config = get_config(VirtIO::ConfigurationType::Device)) {
m_device_configuration = config;
bool success = negotiate_features([&](u64 supported_features) {
@ -94,27 +79,23 @@ void GraphicsAdapter::initialize()
}
VERIFY(success);
finish_init();
initialize_3d_device();
MutexLocker locker(m_operation_lock);
// Get display information using VIRTIO_GPU_CMD_GET_DISPLAY_INFO
query_display_information();
query_display_edid({});
} else {
VERIFY_NOT_REACHED();
}
}
void GraphicsAdapter::create_framebuffer_devices()
Graphics::VirtIOGPU::ResourceID VirtIOGraphicsAdapter::allocate_resource_id(Badge<VirtIODisplayConnector>)
{
for (size_t i = 0; i < min(m_num_scanouts, VIRTIO_GPU_MAX_SCANOUTS); i++) {
auto& scanout = m_scanouts[i];
scanout.framebuffer = adopt_ref(*new VirtIOGPU::FramebufferDevice(*this, i));
scanout.framebuffer->after_inserting();
scanout.console = Kernel::Graphics::VirtIOGPU::Console::initialize(scanout.framebuffer);
}
return m_resource_id_counter++;
}
bool GraphicsAdapter::handle_device_config_change()
Graphics::VirtIOGPU::ContextID VirtIOGraphicsAdapter::allocate_context_id(Badge<VirtIODisplayConnector>)
{
// FIXME: This should really be tracked using a bitmap, instead of an atomic counter
return m_context_id_counter++;
}
bool VirtIOGraphicsAdapter::handle_device_config_change()
{
auto events = get_pending_events();
if (events & VIRTIO_GPU_EVENT_DISPLAY) {
@ -129,7 +110,7 @@ bool GraphicsAdapter::handle_device_config_change()
return true;
}
void GraphicsAdapter::handle_queue_update(u16 queue_index)
void VirtIOGraphicsAdapter::handle_queue_update(u16 queue_index)
{
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Handle queue update");
VERIFY(queue_index == CONTROLQ);
@ -140,145 +121,75 @@ void GraphicsAdapter::handle_queue_update(u16 queue_index)
m_outstanding_request.wake_all();
}
u32 GraphicsAdapter::get_pending_events()
u32 VirtIOGraphicsAdapter::get_pending_events()
{
return config_read32(*m_device_configuration, DEVICE_EVENTS_READ);
}
void GraphicsAdapter::clear_pending_events(u32 event_bitmask)
void VirtIOGraphicsAdapter::clear_pending_events(u32 event_bitmask)
{
config_write32(*m_device_configuration, DEVICE_EVENTS_CLEAR, event_bitmask);
}
void GraphicsAdapter::query_display_information()
ErrorOr<void> VirtIOGraphicsAdapter::query_and_set_edid(u32 scanout_id, VirtIODisplayConnector& display_connector)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request, Protocol::CommandType::VIRTIO_GPU_CMD_GET_DISPLAY_INFO, 0);
auto& response = writer.append_structure<Protocol::DisplayInfoResponse>();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
for (size_t i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; ++i) {
auto& scanout = m_scanouts[i].display_info;
scanout = response.scanout_modes[i];
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Scanout {}: enabled: {} x: {}, y: {}, width: {}, height: {}", i, !!scanout.enabled, scanout.rect.x, scanout.rect.y, scanout.rect.width, scanout.rect.height);
if (scanout.enabled && !m_default_scanout.has_value())
m_default_scanout = i;
m_scanouts[i].edid = {};
}
VERIFY(m_default_scanout.has_value());
}
void GraphicsAdapter::query_display_edid(Optional<ScanoutID> scanout_id)
{
VERIFY(m_operation_lock.is_locked());
SpinlockLocker locker(m_operation_lock);
if (!is_feature_accepted(VIRTIO_GPU_F_EDID))
return;
return Error::from_errno(ENOTSUP);
for (size_t i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; ++i) {
if (scanout_id.has_value() && scanout_id.value() != i)
continue;
// scanout.display_info.enabled doesn't seem to reflect the actual state,
// even if we were to call query_display_information prior to calling
// this function. So, just ignore, we seem to get EDID information regardless.
auto query_edid_result = query_edid(i);
if (query_edid_result.is_error()) {
dbgln("VirtIO::GraphicsAdapater: Scanout {}: Failed to parse EDID: {}", i, query_edid_result.error());
m_scanouts[i].edid = {};
} else {
m_scanouts[i].edid = query_edid_result.release_value();
if (m_scanouts[i].edid.has_value()) {
auto& parsed_edid = m_scanouts[i].edid.value();
dbgln("VirtIO::GraphicsAdapater: Scanout {}: EDID {}: Manufacturer: {} Product: {} Serial #{}", i,
parsed_edid.version(), parsed_edid.legacy_manufacturer_id(), parsed_edid.product_code(), parsed_edid.serial_number());
if (auto screen_size = parsed_edid.screen_size(); screen_size.has_value()) {
auto& size = screen_size.value();
dbgln("VirtIO::GraphicsAdapater: Scanout {}: Screen size: {}cm x {}cm", i,
size.horizontal_cm(), size.vertical_cm());
} else if (auto aspect_ratio = parsed_edid.aspect_ratio(); aspect_ratio.has_value()) {
auto& ratio = aspect_ratio.value();
dbgln("VirtIO::GraphicsAdapater: Scanout {}: Aspect ratio: {} : 1", i, ratio.ratio());
} else {
dbgln("VirtIO::GraphicsAdapater: Scanout {}: Unknown screen size or aspect ratio", i);
}
} else {
dbgln("VirtIO::GraphicsAdapater: Scanout {}: No EDID", i);
}
}
}
}
ErrorOr<ByteBuffer> GraphicsAdapter::get_edid(size_t output_port_index) const
{
if (output_port_index >= VIRTIO_GPU_MAX_SCANOUTS)
return Error::from_errno(ENODEV);
auto& edid = m_scanouts[output_port_index].edid;
if (edid.has_value())
return ByteBuffer::copy(edid.value().bytes());
return ByteBuffer {};
}
auto GraphicsAdapter::query_edid(u32 scanout_id) -> ErrorOr<Optional<EDID::Parser>>
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::GetEDID>();
auto& response = writer.append_structure<Protocol::GetEDIDResponse>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::GetEDID>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::GetEDIDResponse>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_GET_EDID, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_GET_EDID, 0);
request.scanout_id = scanout_id;
request.padding = 0;
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
if (response.header.type != to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_EDID))
if (response.header.type != to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_EDID))
return Error::from_string_literal("VirtIO::GraphicsAdapter: Failed to get EDID");
if (response.size == 0)
return Error::from_string_literal("VirtIO::GraphicsAdapter: Failed to get EDID, empty buffer");
auto edid_buffer = TRY(ByteBuffer::copy(response.edid, response.size));
auto edid = TRY(EDID::Parser::from_bytes(move(edid_buffer)));
return edid;
Array<u8, 128> raw_edid;
memcpy(raw_edid.data(), response.edid, min(sizeof(raw_edid), response.size));
display_connector.set_edid_bytes({}, raw_edid);
return {};
}
ResourceID GraphicsAdapter::create_2d_resource(Protocol::Rect rect)
Graphics::VirtIOGPU::ResourceID VirtIOGraphicsAdapter::create_2d_resource(Graphics::VirtIOGPU::Protocol::Rect rect)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceCreate2D>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceCreate2D>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, 0);
auto resource_id = allocate_resource_id();
request.resource_id = resource_id.value();
request.width = rect.width;
request.height = rect.height;
request.format = to_underlying(Protocol::TextureFormat::VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM);
request.format = to_underlying(Graphics::VirtIOGPU::Protocol::TextureFormat::VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM);
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated 2d resource with id {}", resource_id.value());
return resource_id;
}
ResourceID GraphicsAdapter::create_3d_resource(Protocol::Resource3DSpecification const& resource_3d_specification)
Graphics::VirtIOGPU::ResourceID VirtIOGraphicsAdapter::create_3d_resource(Graphics::VirtIOGPU::Protocol::Resource3DSpecification const& resource_3d_specification)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceCreate3D>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceCreate3D>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, 0);
auto resource_id = allocate_resource_id();
request.resource_id = resource_id.value();
@ -286,17 +197,17 @@ ResourceID GraphicsAdapter::create_3d_resource(Protocol::Resource3DSpecification
u32* start_of_copied_fields = &request.target;
// Validate that the sub copy from the resource_3d_specification to the offset of the request fits.
static_assert((sizeof(request) - offsetof(Protocol::ResourceCreate3D, target) == sizeof(resource_3d_specification)));
static_assert((sizeof(request) - offsetof(Graphics::VirtIOGPU::Protocol::ResourceCreate3D, target) == sizeof(resource_3d_specification)));
memcpy(start_of_copied_fields, &resource_3d_specification, sizeof(resource_3d_specification));
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == static_cast<u32>(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == static_cast<u32>(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated 3d resource with id {}", resource_id.value());
return resource_id;
}
void GraphicsAdapter::ensure_backing_storage(ResourceID resource_id, Memory::Region const& region, size_t buffer_offset, size_t buffer_length)
void VirtIOGraphicsAdapter::ensure_backing_storage(Graphics::VirtIOGPU::ResourceID resource_id, Memory::Region const& region, size_t buffer_offset, size_t buffer_length)
{
VERIFY(m_operation_lock.is_locked());
@ -306,100 +217,97 @@ void GraphicsAdapter::ensure_backing_storage(ResourceID resource_id, Memory::Reg
size_t num_mem_regions = buffer_length / PAGE_SIZE;
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceAttachBacking>();
const size_t header_block_size = sizeof(request) + num_mem_regions * sizeof(Protocol::MemoryEntry);
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceAttachBacking>();
const size_t header_block_size = sizeof(request) + num_mem_regions * sizeof(Graphics::VirtIOGPU::Protocol::MemoryEntry);
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, 0);
request.resource_id = resource_id.value();
request.num_entries = num_mem_regions;
for (size_t i = 0; i < num_mem_regions; ++i) {
auto& memory_entry = writer.append_structure<Protocol::MemoryEntry>();
auto& memory_entry = writer.append_structure<Graphics::VirtIOGPU::Protocol::MemoryEntry>();
memory_entry.address = region.physical_page(first_page_index + i)->paddr().get();
memory_entry.length = PAGE_SIZE;
}
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
synchronous_virtio_gpu_command(start_of_scratch_space(), header_block_size, sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated backing storage");
}
void GraphicsAdapter::detach_backing_storage(ResourceID resource_id)
void VirtIOGraphicsAdapter::detach_backing_storage(Graphics::VirtIOGPU::ResourceID resource_id)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceDetachBacking>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceDetachBacking>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, 0);
request.resource_id = resource_id.value();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Detached backing storage");
}
void GraphicsAdapter::set_scanout_resource(ScanoutID scanout, ResourceID resource_id, Protocol::Rect rect)
void VirtIOGraphicsAdapter::set_scanout_resource(Graphics::VirtIOGPU::ScanoutID scanout, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect rect)
{
VERIFY(m_operation_lock.is_locked());
{
// We need to scope the request/response here so that we can query display information later on
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::SetScanOut>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::SetScanOut>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_SET_SCANOUT, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_SET_SCANOUT, 0);
request.resource_id = resource_id.value();
request.scanout_id = scanout.value();
request.rect = rect;
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Set backing scanout");
}
// Now that the Scanout should be enabled, update the EDID
query_display_edid(scanout);
}
void GraphicsAdapter::transfer_framebuffer_data_to_host(ScanoutID scanout, ResourceID resource_id, Protocol::Rect const& dirty_rect)
void VirtIOGraphicsAdapter::transfer_framebuffer_data_to_host(Graphics::VirtIOGPU::ScanoutID scanout, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::TransferToHost2D>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::TransferToHost2D>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, 0);
request.offset = (dirty_rect.x + (dirty_rect.y * m_scanouts[scanout.value()].display_info.rect.width)) * sizeof(u32);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, 0);
request.offset = (dirty_rect.x + (dirty_rect.y * m_scanouts[scanout.value()].display_connector->display_information({}).rect.width)) * sizeof(u32);
request.resource_id = resource_id.value();
request.rect = dirty_rect;
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
void GraphicsAdapter::flush_displayed_image(ResourceID resource_id, Protocol::Rect const& dirty_rect)
void VirtIOGraphicsAdapter::flush_displayed_image(Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceFlush>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceFlush>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_FLUSH, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_FLUSH, 0);
request.resource_id = resource_id.value();
request.rect = dirty_rect;
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
void GraphicsAdapter::synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t request_size, size_t response_size)
void VirtIOGraphicsAdapter::synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t request_size, size_t response_size)
{
VERIFY(m_operation_lock.is_locked());
VERIFY(m_outstanding_request.is_empty());
@ -415,7 +323,7 @@ void GraphicsAdapter::synchronous_virtio_gpu_command(PhysicalAddress buffer_star
m_outstanding_request.wait_forever();
}
void GraphicsAdapter::populate_virtio_gpu_request_header(Protocol::ControlHeader& header, Protocol::CommandType ctrl_type, u32 flags)
void VirtIOGraphicsAdapter::populate_virtio_gpu_request_header(Graphics::VirtIOGPU::Protocol::ControlHeader& header, Graphics::VirtIOGPU::Protocol::CommandType ctrl_type, u32 flags)
{
header.type = to_underlying(ctrl_type);
header.flags = flags;
@ -424,61 +332,57 @@ void GraphicsAdapter::populate_virtio_gpu_request_header(Protocol::ControlHeader
header.padding = 0;
}
void GraphicsAdapter::flush_dirty_rectangle(ScanoutID scanout_id, ResourceID resource_id, Protocol::Rect const& dirty_rect)
void VirtIOGraphicsAdapter::flush_dirty_rectangle(Graphics::VirtIOGPU::ScanoutID scanout_id, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect)
{
MutexLocker locker(m_operation_lock);
VERIFY(m_operation_lock.is_locked());
transfer_framebuffer_data_to_host(scanout_id, resource_id, dirty_rect);
flush_displayed_image(resource_id, dirty_rect);
}
ResourceID GraphicsAdapter::allocate_resource_id()
Graphics::VirtIOGPU::ResourceID VirtIOGraphicsAdapter::allocate_resource_id()
{
VERIFY(m_operation_lock.is_locked());
m_resource_id_counter = m_resource_id_counter.value() + 1;
return m_resource_id_counter;
return m_resource_id_counter++;
}
ContextID GraphicsAdapter::allocate_context_id()
Graphics::VirtIOGPU::ContextID VirtIOGraphicsAdapter::allocate_context_id()
{
// FIXME: This should really be tracked using a bitmap, instead of an atomic counter
VERIFY(m_operation_lock.is_locked());
m_context_id_counter = m_context_id_counter.value() + 1;
return m_context_id_counter;
return m_context_id_counter++;
}
void GraphicsAdapter::delete_resource(ResourceID resource_id)
void VirtIOGraphicsAdapter::delete_resource(Graphics::VirtIOGPU::ResourceID resource_id)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceUnref>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceUnref>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_UNREF, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_UNREF, 0);
request.resource_id = resource_id.value();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
void GraphicsAdapter::initialize_3d_device()
void VirtIOGraphicsAdapter::initialize_3d_device()
{
if (m_has_virgl_support) {
MutexLocker locker(m_operation_lock);
m_3d_device = VirtIOGPU::GPU3DDevice::must_create(*this);
SpinlockLocker locker(m_operation_lock);
m_3d_device = VirtIOGPU3DDevice::must_create(*this);
}
}
ContextID GraphicsAdapter::create_context()
Graphics::VirtIOGPU::ContextID VirtIOGraphicsAdapter::create_context()
{
VERIFY(m_operation_lock.is_locked());
auto ctx_id = allocate_context_id();
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ContextCreate>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ContextCreate>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
constexpr char const* region_name = "Serenity VirGL3D Context";
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_CTX_CREATE, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_CTX_CREATE, 0);
request.header.context_id = ctx_id.value();
request.name_length = strlen(region_name);
memset(request.debug_name.data(), 0, 64);
@ -487,53 +391,53 @@ ContextID GraphicsAdapter::create_context()
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
return ctx_id;
}
void GraphicsAdapter::submit_command_buffer(ContextID context_id, Function<size_t(Bytes)> buffer_writer)
void VirtIOGraphicsAdapter::submit_command_buffer(Graphics::VirtIOGPU::ContextID context_id, Function<size_t(Bytes)> buffer_writer)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::CommandSubmit>();
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::CommandSubmit>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_SUBMIT_3D, 0);
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_SUBMIT_3D, 0);
request.header.context_id = context_id.value();
auto max_command_buffer_length = m_scratch_space->size() - sizeof(request) - sizeof(Protocol::ControlHeader);
auto max_command_buffer_length = m_scratch_space->size() - sizeof(request) - sizeof(Graphics::VirtIOGPU::Protocol::ControlHeader);
// Truncate to nearest multiple of alignment, to ensure padding loop doesn't exhaust allocated space
max_command_buffer_length -= max_command_buffer_length % alignof(Protocol::ControlHeader);
max_command_buffer_length -= max_command_buffer_length % alignof(Graphics::VirtIOGPU::Protocol::ControlHeader);
Bytes command_buffer_buffer(m_scratch_space->vaddr().offset(sizeof(request)).as_ptr(), max_command_buffer_length);
request.size = buffer_writer(command_buffer_buffer);
writer.skip_bytes(request.size);
// The alignment of a ControlHeader may be a few words larger than the length of a command buffer, so
// we pad with no-ops until we reach the correct alignment
while (writer.current_offset() % alignof(Protocol::ControlHeader) != 0) {
VERIFY((writer.current_offset() % alignof(Protocol::ControlHeader)) % sizeof(u32) == 0);
writer.append_structure<u32>() = to_underlying(VirGLCommand::NOP);
while (writer.current_offset() % alignof(Graphics::VirtIOGPU::Protocol::ControlHeader) != 0) {
VERIFY((writer.current_offset() % alignof(Graphics::VirtIOGPU::Protocol::ControlHeader)) % sizeof(u32) == 0);
writer.append_structure<u32>() = to_underlying(Graphics::VirtIOGPU::VirGLCommand::NOP);
request.size += 4;
}
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Sending command buffer of length {}", request.size);
auto& response = writer.append_structure<Protocol::ControlHeader>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request) + request.size, sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
void GraphicsAdapter::attach_resource_to_context(ResourceID resource_id, ContextID context_id)
void VirtIOGraphicsAdapter::attach_resource_to_context(Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::ContextID context_id)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ContextAttachResource>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, 0);
auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ContextAttachResource>();
auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, 0);
request.header.context_id = context_id.value();
request.resource_id = resource_id.value();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
VERIFY(response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
}

View file

@ -10,16 +10,10 @@
#include <AK/DistinctNumeric.h>
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/Queue.h>
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Graphics/GenericGraphicsAdapter.h>
#include <Kernel/Graphics/VirtIOGPU/Console.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/Graphics/VirtIOGPU/Protocol.h>
#include <LibEDID/EDID.h>
namespace Kernel::Graphics::VirtIOGPU {
class GPU3DDevice;
namespace Kernel {
#define VIRTIO_GPU_F_VIRGL (1 << 0)
#define VIRTIO_GPU_F_EDID (1 << 1)
@ -34,16 +28,16 @@ class GPU3DDevice;
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
class FramebufferDevice;
class GraphicsAdapter final
class VirtIODisplayConnector;
class VirtIOGPU3DDevice;
class VirtIOGraphicsAdapter final
: public GenericGraphicsAdapter
, public VirtIO::Device {
friend class FramebufferDevice;
friend class VirtIODisplayConnector;
friend class VirtIOGPU3DDevice;
public:
static NonnullRefPtr<GraphicsAdapter> initialize(PCI::DeviceIdentifier const&);
virtual bool framebuffer_devices_initialized() const override { return m_created_framebuffer_devices; }
static NonnullRefPtr<VirtIOGraphicsAdapter> initialize(PCI::DeviceIdentifier const&);
// FIXME: There's a VirtIO VGA GPU variant, so we should consider that
virtual bool vga_compatible() const override { return false; }
@ -51,76 +45,46 @@ public:
virtual void initialize() override;
void initialize_3d_device();
ErrorOr<ByteBuffer> get_edid(size_t output_port_index) const override;
bool edid_feature_accepted() const;
Graphics::VirtIOGPU::ResourceID allocate_resource_id(Badge<VirtIODisplayConnector>);
Graphics::VirtIOGPU::ContextID allocate_context_id(Badge<VirtIODisplayConnector>);
private:
void flush_dirty_rectangle(ScanoutID, ResourceID, Protocol::Rect const& dirty_rect);
template<typename F>
IterationDecision for_each_framebuffer(F f)
{
for (auto& scanout : m_scanouts) {
if (!scanout.framebuffer)
continue;
IterationDecision decision = f(*scanout.framebuffer, *scanout.console);
if (decision != IterationDecision::Continue)
return decision;
}
return IterationDecision::Continue;
}
RefPtr<Console> default_console()
{
if (m_default_scanout.has_value())
return m_scanouts[m_default_scanout.value().value()].console;
return {};
}
auto& display_info(ScanoutID scanout) const
{
VERIFY(scanout.value() < VIRTIO_GPU_MAX_SCANOUTS);
return m_scanouts[scanout.value()].display_info;
}
auto& display_info(ScanoutID scanout)
{
VERIFY(scanout.value() < VIRTIO_GPU_MAX_SCANOUTS);
return m_scanouts[scanout.value()].display_info;
}
explicit GraphicsAdapter(PCI::DeviceIdentifier const&);
void create_framebuffer_devices();
virtual void initialize_framebuffer_devices() override;
virtual void enable_consoles() override;
virtual void disable_consoles() override;
virtual bool modesetting_capable() const override { return false; }
virtual bool double_framebuffering_capable() const override { return false; }
virtual bool try_to_set_resolution(size_t, size_t, size_t) override { return false; }
virtual bool set_y_offset(size_t, size_t) override { return false; }
void flush_dirty_rectangle(Graphics::VirtIOGPU::ScanoutID, Graphics::VirtIOGPU::ResourceID, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect);
struct Scanout {
RefPtr<Graphics::VirtIOGPU::FramebufferDevice> framebuffer;
RefPtr<Console> console;
Protocol::DisplayInfoResponse::Display display_info {};
Optional<EDID::Parser> edid;
RefPtr<VirtIODisplayConnector> display_connector;
};
VirtIOGraphicsAdapter(PCI::DeviceIdentifier const&, NonnullOwnPtr<Memory::Region> scratch_space_region);
ErrorOr<void> initialize_adapter();
// ^GenericGraphicsAdapter
// FIXME: Remove all of these methods when we get rid of the FramebufferDevice class.
virtual ErrorOr<ByteBuffer> get_edid(size_t) const override { VERIFY_NOT_REACHED(); }
virtual bool try_to_set_resolution(size_t, size_t, size_t) override { VERIFY_NOT_REACHED(); }
virtual bool set_y_offset(size_t, size_t) override { VERIFY_NOT_REACHED(); }
virtual void initialize_framebuffer_devices() override { }
virtual void enable_consoles() override { }
virtual void disable_consoles() override { }
virtual bool framebuffer_devices_initialized() const override { return false; }
virtual bool modesetting_capable() const override { return true; }
virtual bool double_framebuffering_capable() const override { return true; }
virtual bool handle_device_config_change() override;
virtual void handle_queue_update(u16 queue_index) override;
u32 get_pending_events();
void clear_pending_events(u32 event_bitmask);
// 3D Command stuff
ContextID create_context();
void attach_resource_to_context(ResourceID resource_id, ContextID context_id);
void submit_command_buffer(ContextID, Function<size_t(Bytes)> buffer_writer);
Protocol::TextureFormat get_framebuffer_format() const { return Protocol::TextureFormat::VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; }
Graphics::VirtIOGPU::ContextID create_context();
void attach_resource_to_context(Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::ContextID context_id);
void submit_command_buffer(Graphics::VirtIOGPU::ContextID, Function<size_t(Bytes)> buffer_writer);
Graphics::VirtIOGPU::Protocol::TextureFormat get_framebuffer_format() const { return Graphics::VirtIOGPU::Protocol::TextureFormat::VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; }
auto& operation_lock() { return m_operation_lock; }
ResourceID allocate_resource_id();
ContextID allocate_context_id();
Graphics::VirtIOGPU::ResourceID allocate_resource_id();
Graphics::VirtIOGPU::ContextID allocate_context_id();
PhysicalAddress start_of_scratch_space() const { return m_scratch_space->physical_page(0)->paddr(); }
AK::BinaryBufferWriter create_scratchspace_writer()
@ -128,36 +92,31 @@ private:
return { Bytes(m_scratch_space->vaddr().as_ptr(), m_scratch_space->size()) };
}
void synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t request_size, size_t response_size);
void populate_virtio_gpu_request_header(Protocol::ControlHeader& header, Protocol::CommandType ctrl_type, u32 flags = 0);
void populate_virtio_gpu_request_header(Graphics::VirtIOGPU::Protocol::ControlHeader& header, Graphics::VirtIOGPU::Protocol::CommandType ctrl_type, u32 flags = 0);
void query_display_information();
void query_display_edid(Optional<ScanoutID>);
ResourceID create_2d_resource(Protocol::Rect rect);
ResourceID create_3d_resource(Protocol::Resource3DSpecification const& resource_3d_specification);
void delete_resource(ResourceID resource_id);
void ensure_backing_storage(ResourceID resource_id, Memory::Region const& region, size_t buffer_offset, size_t buffer_length);
void detach_backing_storage(ResourceID resource_id);
void set_scanout_resource(ScanoutID scanout, ResourceID resource_id, Protocol::Rect rect);
void transfer_framebuffer_data_to_host(ScanoutID scanout, ResourceID resource_id, Protocol::Rect const& rect);
void flush_displayed_image(ResourceID resource_id, Protocol::Rect const& dirty_rect);
ErrorOr<Optional<EDID::Parser>> query_edid(u32 scanout_id);
Graphics::VirtIOGPU::ResourceID create_2d_resource(Graphics::VirtIOGPU::Protocol::Rect rect);
Graphics::VirtIOGPU::ResourceID create_3d_resource(Graphics::VirtIOGPU::Protocol::Resource3DSpecification const& resource_3d_specification);
void delete_resource(Graphics::VirtIOGPU::ResourceID resource_id);
void ensure_backing_storage(Graphics::VirtIOGPU::ResourceID resource_id, Memory::Region const& region, size_t buffer_offset, size_t buffer_length);
void detach_backing_storage(Graphics::VirtIOGPU::ResourceID resource_id);
void set_scanout_resource(Graphics::VirtIOGPU::ScanoutID scanout, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect rect);
void transfer_framebuffer_data_to_host(Graphics::VirtIOGPU::ScanoutID scanout, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& rect);
void flush_displayed_image(Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect);
ErrorOr<void> query_and_set_edid(u32 scanout_id, VirtIODisplayConnector& display_connector);
bool m_created_framebuffer_devices { false };
Optional<ScanoutID> m_default_scanout;
size_t m_num_scanouts { 0 };
Scanout m_scanouts[VIRTIO_GPU_MAX_SCANOUTS];
VirtIO::Configuration const* m_device_configuration { nullptr };
ResourceID m_resource_id_counter { 0 };
ContextID m_context_id_counter { 0 };
RefPtr<GPU3DDevice> m_3d_device;
// Note: Resource ID 0 is invalid, and we must not allocate 0 as the first resource ID.
Atomic<u32> m_resource_id_counter { 1 };
Atomic<u32> m_context_id_counter { 1 };
RefPtr<VirtIOGPU3DDevice> m_3d_device;
bool m_has_virgl_support { false };
// Synchronous commands
WaitQueue m_outstanding_request;
Mutex m_operation_lock;
OwnPtr<Memory::Region> m_scratch_space;
friend class Kernel::Graphics::VirtIOGPU::GPU3DDevice;
Spinlock m_operation_lock;
NonnullOwnPtr<Memory::Region> m_scratch_space;
};
}