1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-06-01 06:18:12 +00:00

LibVideo: Allow the VP9 decoder to queue multiple frames

Frames will now be queued for retrieval by the user of the decoder.
When the end of the current queue is reached, a DecoderError of
category NeedsMoreInput will be emitted, allowing the caller to react
by displaying what was previously retrieved for sending more samples.
This commit is contained in:
Zaggy1024 2022-11-03 19:18:38 -05:00 committed by Andrew Kaster
parent 993385f18d
commit 72ed286e16
5 changed files with 76 additions and 39 deletions

View file

@ -22,6 +22,7 @@ using DecoderErrorOr = ErrorOr<T, DecoderError>;
enum class DecoderErrorCategory : u32 {
Unknown,
IO,
NeedsMoreInput,
EndOfStream,
Memory,
// The input is corrupted.

View file

@ -195,6 +195,11 @@ void PlaybackManager::restart_playback()
on_decoder_error(result.release_error());
}
void PlaybackManager::post_decoder_error(DecoderError error)
{
m_main_loop.post_event(*this, make<DecoderErrorEvent>(error));
}
bool PlaybackManager::decode_and_queue_one_sample()
{
if (m_frame_queue->size() >= FRAME_BUFFER_COUNT)
@ -215,9 +220,25 @@ bool PlaybackManager::decode_and_queue_one_sample()
})
auto frame_sample = TRY_OR_ENQUEUE_ERROR(m_demuxer->get_next_video_sample_for_track(m_selected_video_track));
OwnPtr<VideoFrame> decoded_frame = nullptr;
while (!decoded_frame) {
TRY_OR_ENQUEUE_ERROR(m_decoder->receive_sample(frame_sample->data()));
TRY_OR_ENQUEUE_ERROR(m_decoder->receive_sample(frame_sample->data()));
auto decoded_frame = TRY_OR_ENQUEUE_ERROR(m_decoder->get_decoded_frame());
while (true) {
auto frame_result = m_decoder->get_decoded_frame();
if (frame_result.is_error()) {
if (frame_result.error().category() == DecoderErrorCategory::NeedsMoreInput)
break;
post_decoder_error(frame_result.release_error());
return false;
}
decoded_frame = frame_result.release_value();
VERIFY(decoded_frame);
}
}
auto& cicp = decoded_frame->cicp();
cicp.adopt_specified_values(frame_sample->container_cicp());

View file

@ -124,7 +124,8 @@ private:
bool prepare_next_frame();
void update_presented_frame();
// Runs off the main thread
// May run off the main thread
void post_decoder_error(DecoderError error);
bool decode_and_queue_one_sample();
void on_decode_timer();

View file

@ -77,15 +77,55 @@ DecoderErrorOr<void> Decoder::decode_frame(Span<u8 const> frame_data)
}
// 4. The output process as specified in section 8.9 is invoked.
// FIXME: Create a struct to store an output frame along with all information needed to display
// it. This function will need to append the images to a vector to ensure that if a superframe
// with multiple output frames is encountered, all of them can be displayed.
if (m_parser->m_show_frame)
TRY(create_video_frame());
// 5. The reference frame update process as specified in section 8.10 is invoked.
TRY(update_reference_frames());
return {};
}
DecoderErrorOr<void> Decoder::create_video_frame()
{
size_t decoded_y_width = m_parser->m_mi_cols * 8;
Gfx::Size<size_t> output_y_size = {
m_parser->m_frame_width,
m_parser->m_frame_height,
};
auto decoded_uv_width = decoded_y_width >> m_parser->m_subsampling_x;
Gfx::Size<size_t> output_uv_size = {
output_y_size.width() >> m_parser->m_subsampling_x,
output_y_size.height() >> m_parser->m_subsampling_y,
};
Array<FixedArray<u16>, 3> output_buffers = {
DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_y_size.width() * output_y_size.height())),
DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_uv_size.width() * output_uv_size.height())),
DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_uv_size.width() * output_uv_size.height())),
};
for (u8 plane = 0; plane < 3; plane++) {
auto& buffer = output_buffers[plane];
auto decoded_width = plane == 0 ? decoded_y_width : decoded_uv_width;
auto output_size = plane == 0 ? output_y_size : output_uv_size;
auto const& decoded_buffer = get_output_buffer(plane);
for (u32 row = 0; row < output_size.height(); row++) {
memcpy(
buffer.data() + row * output_size.width(),
decoded_buffer.data() + row * decoded_width,
output_size.width() * sizeof(*buffer.data()));
}
}
auto frame = DECODER_TRY_ALLOC(adopt_nonnull_own_or_enomem(new (nothrow) SubsampledYUVFrame(
{ output_y_size.width(), output_y_size.height() },
m_parser->m_bit_depth, get_cicp_color_space(),
m_parser->m_subsampling_x, m_parser->m_subsampling_y,
output_buffers[0], output_buffers[1], output_buffers[2])));
m_video_frame_queue.enqueue(move(frame));
return {};
}
inline size_t buffer_size(size_t width, size_t height)
{
return width * height;
@ -186,40 +226,10 @@ inline CodingIndependentCodePoints Decoder::get_cicp_color_space()
DecoderErrorOr<NonnullOwnPtr<VideoFrame>> Decoder::get_decoded_frame()
{
size_t decoded_y_width = m_parser->m_mi_cols * 8;
Gfx::Size<size_t> output_y_size = {
m_parser->m_frame_width,
m_parser->m_frame_height,
};
auto decoded_uv_width = decoded_y_width >> m_parser->m_subsampling_x;
Gfx::Size<size_t> output_uv_size = {
output_y_size.width() >> m_parser->m_subsampling_x,
output_y_size.height() >> m_parser->m_subsampling_y,
};
Array<FixedArray<u16>, 3> output_buffers = {
DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_y_size.width() * output_y_size.height())),
DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_uv_size.width() * output_uv_size.height())),
DECODER_TRY_ALLOC(FixedArray<u16>::try_create(output_uv_size.width() * output_uv_size.height())),
};
for (u8 plane = 0; plane < 3; plane++) {
auto& buffer = output_buffers[plane];
auto decoded_width = plane == 0 ? decoded_y_width : decoded_uv_width;
auto output_size = plane == 0 ? output_y_size : output_uv_size;
auto const& decoded_buffer = get_output_buffer(plane);
if (m_video_frame_queue.is_empty())
return DecoderError::format(DecoderErrorCategory::NeedsMoreInput, "No video frame in queue.");
for (u32 row = 0; row < output_size.height(); row++) {
memcpy(
buffer.data() + row * output_size.width(),
decoded_buffer.data() + row * decoded_width,
output_size.width() * sizeof(*buffer.data()));
}
}
return DECODER_TRY_ALLOC(adopt_nonnull_own_or_enomem(new (nothrow) SubsampledYUVFrame(
{ output_y_size.width(), output_y_size.height() },
m_parser->m_bit_depth, get_cicp_color_space(),
m_parser->m_subsampling_x, m_parser->m_subsampling_y,
output_buffers[0], output_buffers[1], output_buffers[2])));
return m_video_frame_queue.dequeue();
}
u8 Decoder::merge_prob(u8 pre_prob, u8 count_0, u8 count_1, u8 count_sat, u8 max_update_factor)

View file

@ -10,6 +10,7 @@
#include <AK/ByteBuffer.h>
#include <AK/Error.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/Queue.h>
#include <AK/Span.h>
#include <LibVideo/Color/CodingIndependentCodePoints.h>
#include <LibVideo/DecoderError.h>
@ -36,6 +37,7 @@ private:
typedef i32 Intermediate;
DecoderErrorOr<void> decode_frame(Span<u8 const>);
DecoderErrorOr<void> create_video_frame();
DecoderErrorOr<void> allocate_buffers();
Vector<Intermediate>& get_temp_buffer(u8 plane);
@ -167,6 +169,8 @@ private:
Vector<Intermediate> intermediate[3];
Vector<u16> output[3];
} m_buffers;
Queue<NonnullOwnPtr<VideoFrame>, 1> m_video_frame_queue;
};
}