1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 19:17:44 +00:00

Kernel: Implement basic VirGL device

This commit flips VirtIOGPU back to using a Mutex for its operation
lock (instead of a spinlock). This is necessary for avoiding a few
system hangs when queuing actions on the driver from multiple
processes, which becomes much more of an issue when using VirGL from
multiple userspace process.

This does result in a few code paths where we inevitably have to grab
a mutex from inside a spinlock, the only way to fix both issues is to
move to issuing asynchronous virtio gpu commands.
This commit is contained in:
Sahan Fernando 2022-02-13 16:45:30 +11:00 committed by Ali Mohammad Pur
parent 966989afe8
commit fd6a536c60
7 changed files with 503 additions and 8 deletions

View file

@ -7,10 +7,12 @@
#include <AK/BinaryBufferWriter.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Graphics/Console/GenericFramebufferConsole.h>
#include <Kernel/Graphics/GraphicsManagement.h>
#include <Kernel/Graphics/VirtIOGPU/Console.h>
#include <Kernel/Graphics/VirtIOGPU/FramebufferDevice.h>
#include <Kernel/Graphics/VirtIOGPU/GPU3DDevice.h>
#include <Kernel/Graphics/VirtIOGPU/GraphicsAdapter.h>
namespace Kernel::Graphics::VirtIOGPU {
@ -74,8 +76,11 @@ void GraphicsAdapter::initialize()
m_device_configuration = config;
bool success = negotiate_features([&](u64 supported_features) {
u64 negotiated = 0;
if (is_feature_set(supported_features, VIRTIO_GPU_F_VIRGL))
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: VIRGL is not yet supported!");
if (is_feature_set(supported_features, VIRTIO_GPU_F_VIRGL)) {
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: VirGL is available, enabling");
negotiated |= VIRTIO_GPU_F_VIRGL;
m_has_virgl_support = true;
}
if (is_feature_set(supported_features, VIRTIO_GPU_F_EDID))
negotiated |= VIRTIO_GPU_F_EDID;
return negotiated;
@ -89,7 +94,8 @@ void GraphicsAdapter::initialize()
}
VERIFY(success);
finish_init();
SpinlockLocker locker(m_operation_lock);
initialize_3d_device();
MutexLocker locker(m_operation_lock);
// Get display information using VIRTIO_GPU_CMD_GET_DISPLAY_INFO
query_display_information();
query_display_edid({});
@ -265,6 +271,28 @@ ResourceID GraphicsAdapter::create_2d_resource(Protocol::Rect rect)
return resource_id;
}
ResourceID GraphicsAdapter::create_3d_resource(Protocol::Resource3DSpecification const& resource_3d_specification)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ResourceCreate3D>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, VIRTIO_GPU_FLAG_FENCE);
auto resource_id = allocate_resource_id();
request.resource_id = resource_id.value();
// TODO: Abstract this out a bit more
u32* start_of_copied_fields = &request.target;
memcpy(start_of_copied_fields, &resource_3d_specification, sizeof(Protocol::Resource3DSpecification));
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == static_cast<u32>(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated 3d resource with id {}", resource_id.value());
return resource_id;
}
void GraphicsAdapter::ensure_backing_storage(ResourceID resource_id, Memory::Region const& region, size_t buffer_offset, size_t buffer_length)
{
VERIFY(m_operation_lock.is_locked());
@ -395,7 +423,7 @@ void GraphicsAdapter::populate_virtio_gpu_request_header(Protocol::ControlHeader
void GraphicsAdapter::flush_dirty_rectangle(ScanoutID scanout_id, ResourceID resource_id, Protocol::Rect const& dirty_rect)
{
SpinlockLocker locker(m_operation_lock);
MutexLocker locker(m_operation_lock);
transfer_framebuffer_data_to_host(scanout_id, resource_id, dirty_rect);
flush_displayed_image(resource_id, dirty_rect);
}
@ -407,6 +435,14 @@ ResourceID GraphicsAdapter::allocate_resource_id()
return m_resource_id_counter;
}
ContextID GraphicsAdapter::allocate_context_id()
{
// FIXME: This should really be tracked using a bitmap, instead of an atomic counter
VERIFY(m_operation_lock.is_locked());
m_context_id_counter = m_context_id_counter.value() + 1;
return m_context_id_counter;
}
void GraphicsAdapter::delete_resource(ResourceID resource_id)
{
VERIFY(m_operation_lock.is_locked());
@ -422,4 +458,79 @@ void GraphicsAdapter::delete_resource(ResourceID resource_id)
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
void GraphicsAdapter::initialize_3d_device()
{
if (m_has_virgl_support) {
MutexLocker locker(m_operation_lock);
m_3d_device = MUST(DeviceManagement::try_create_device<VirtIOGPU::GPU3DDevice>(*this));
}
}
ContextID GraphicsAdapter::create_context()
{
VERIFY(m_operation_lock.is_locked());
auto ctx_id = allocate_context_id();
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ContextCreate>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
constexpr char const* region_name = "Serenity VirGL3D Context";
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_CTX_CREATE, VIRTIO_GPU_FLAG_FENCE);
request.header.context_id = ctx_id.value();
request.name_length = strlen(region_name);
memset(request.debug_name.data(), 0, 64);
VERIFY(request.name_length <= 64);
memcpy(request.debug_name.data(), region_name, request.name_length);
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
return ctx_id;
}
void GraphicsAdapter::submit_command_buffer(ContextID context_id, Function<size_t(Bytes)> buffer_writer)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::CommandSubmit>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_SUBMIT_3D, VIRTIO_GPU_FLAG_FENCE);
request.header.context_id = context_id.value();
auto max_command_buffer_length = m_scratch_space->size() - sizeof(request) - sizeof(Protocol::ControlHeader);
// Truncate to nearest multiple of alignment, to ensure padding loop doesn't exhaust allocated space
max_command_buffer_length -= max_command_buffer_length % alignof(Protocol::ControlHeader);
Bytes command_buffer_buffer(m_scratch_space->vaddr().offset(sizeof(request)).as_ptr(), max_command_buffer_length);
request.size = buffer_writer(command_buffer_buffer);
writer.skip_bytes(request.size);
// The alignment of a ControlHeader may be a few words larger than the length of a command buffer, so
// we pad with no-ops until we reach the correct alignment
while (writer.current_offset() % alignof(Protocol::ControlHeader) != 0) {
VERIFY((writer.current_offset() % alignof(Protocol::ControlHeader)) % sizeof(u32) == 0);
writer.append_structure<u32>() = to_underlying(VirGLCommand::NOP);
request.size += 4;
}
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Sending command buffer of length {}", request.size);
auto& response = writer.append_structure<Protocol::ControlHeader>();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request) + request.size, sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
void GraphicsAdapter::attach_resource_to_context(ResourceID resource_id, ContextID context_id)
{
VERIFY(m_operation_lock.is_locked());
auto writer = create_scratchspace_writer();
auto& request = writer.append_structure<Protocol::ContextAttachResource>();
auto& response = writer.append_structure<Protocol::ControlHeader>();
populate_virtio_gpu_request_header(request.header, Protocol::CommandType::VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_FLAG_FENCE);
request.header.context_id = context_id.value();
request.resource_id = resource_id.value();
synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response));
VERIFY(response.type == to_underlying(Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA));
}
}