diff --git a/Userland/Libraries/LibVideo/DecoderError.h b/Userland/Libraries/LibVideo/DecoderError.h index 0f24e2ba20..022de9de15 100644 --- a/Userland/Libraries/LibVideo/DecoderError.h +++ b/Userland/Libraries/LibVideo/DecoderError.h @@ -22,6 +22,7 @@ using DecoderErrorOr = ErrorOr; enum class DecoderErrorCategory : u32 { Unknown, IO, + NeedsMoreInput, EndOfStream, Memory, // The input is corrupted. diff --git a/Userland/Libraries/LibVideo/PlaybackManager.cpp b/Userland/Libraries/LibVideo/PlaybackManager.cpp index 7d6ca6adc0..c6579a8aa0 100644 --- a/Userland/Libraries/LibVideo/PlaybackManager.cpp +++ b/Userland/Libraries/LibVideo/PlaybackManager.cpp @@ -195,6 +195,11 @@ void PlaybackManager::restart_playback() on_decoder_error(result.release_error()); } +void PlaybackManager::post_decoder_error(DecoderError error) +{ + m_main_loop.post_event(*this, make(error)); +} + bool PlaybackManager::decode_and_queue_one_sample() { if (m_frame_queue->size() >= FRAME_BUFFER_COUNT) @@ -215,9 +220,25 @@ bool PlaybackManager::decode_and_queue_one_sample() }) auto frame_sample = TRY_OR_ENQUEUE_ERROR(m_demuxer->get_next_video_sample_for_track(m_selected_video_track)); + OwnPtr decoded_frame = nullptr; + while (!decoded_frame) { + TRY_OR_ENQUEUE_ERROR(m_decoder->receive_sample(frame_sample->data())); - TRY_OR_ENQUEUE_ERROR(m_decoder->receive_sample(frame_sample->data())); - auto decoded_frame = TRY_OR_ENQUEUE_ERROR(m_decoder->get_decoded_frame()); + while (true) { + auto frame_result = m_decoder->get_decoded_frame(); + + if (frame_result.is_error()) { + if (frame_result.error().category() == DecoderErrorCategory::NeedsMoreInput) + break; + + post_decoder_error(frame_result.release_error()); + return false; + } + + decoded_frame = frame_result.release_value(); + VERIFY(decoded_frame); + } + } auto& cicp = decoded_frame->cicp(); cicp.adopt_specified_values(frame_sample->container_cicp()); diff --git a/Userland/Libraries/LibVideo/PlaybackManager.h b/Userland/Libraries/LibVideo/PlaybackManager.h index 1ac3256336..320d409268 100644 --- a/Userland/Libraries/LibVideo/PlaybackManager.h +++ b/Userland/Libraries/LibVideo/PlaybackManager.h @@ -124,7 +124,8 @@ private: bool prepare_next_frame(); void update_presented_frame(); - // Runs off the main thread + // May run off the main thread + void post_decoder_error(DecoderError error); bool decode_and_queue_one_sample(); void on_decode_timer(); diff --git a/Userland/Libraries/LibVideo/VP9/Decoder.cpp b/Userland/Libraries/LibVideo/VP9/Decoder.cpp index a41a6980fb..90f8160e04 100644 --- a/Userland/Libraries/LibVideo/VP9/Decoder.cpp +++ b/Userland/Libraries/LibVideo/VP9/Decoder.cpp @@ -77,15 +77,55 @@ DecoderErrorOr Decoder::decode_frame(Span frame_data) } // 4. The output process as specified in section 8.9 is invoked. - // FIXME: Create a struct to store an output frame along with all information needed to display - // it. This function will need to append the images to a vector to ensure that if a superframe - // with multiple output frames is encountered, all of them can be displayed. + if (m_parser->m_show_frame) + TRY(create_video_frame()); // 5. The reference frame update process as specified in section 8.10 is invoked. TRY(update_reference_frames()); return {}; } +DecoderErrorOr Decoder::create_video_frame() +{ + size_t decoded_y_width = m_parser->m_mi_cols * 8; + Gfx::Size output_y_size = { + m_parser->m_frame_width, + m_parser->m_frame_height, + }; + auto decoded_uv_width = decoded_y_width >> m_parser->m_subsampling_x; + Gfx::Size output_uv_size = { + output_y_size.width() >> m_parser->m_subsampling_x, + output_y_size.height() >> m_parser->m_subsampling_y, + }; + Array, 3> output_buffers = { + DECODER_TRY_ALLOC(FixedArray::try_create(output_y_size.width() * output_y_size.height())), + DECODER_TRY_ALLOC(FixedArray::try_create(output_uv_size.width() * output_uv_size.height())), + DECODER_TRY_ALLOC(FixedArray::try_create(output_uv_size.width() * output_uv_size.height())), + }; + for (u8 plane = 0; plane < 3; plane++) { + auto& buffer = output_buffers[plane]; + auto decoded_width = plane == 0 ? decoded_y_width : decoded_uv_width; + auto output_size = plane == 0 ? output_y_size : output_uv_size; + auto const& decoded_buffer = get_output_buffer(plane); + + for (u32 row = 0; row < output_size.height(); row++) { + memcpy( + buffer.data() + row * output_size.width(), + decoded_buffer.data() + row * decoded_width, + output_size.width() * sizeof(*buffer.data())); + } + } + + auto frame = DECODER_TRY_ALLOC(adopt_nonnull_own_or_enomem(new (nothrow) SubsampledYUVFrame( + { output_y_size.width(), output_y_size.height() }, + m_parser->m_bit_depth, get_cicp_color_space(), + m_parser->m_subsampling_x, m_parser->m_subsampling_y, + output_buffers[0], output_buffers[1], output_buffers[2]))); + m_video_frame_queue.enqueue(move(frame)); + + return {}; +} + inline size_t buffer_size(size_t width, size_t height) { return width * height; @@ -186,40 +226,10 @@ inline CodingIndependentCodePoints Decoder::get_cicp_color_space() DecoderErrorOr> Decoder::get_decoded_frame() { - size_t decoded_y_width = m_parser->m_mi_cols * 8; - Gfx::Size output_y_size = { - m_parser->m_frame_width, - m_parser->m_frame_height, - }; - auto decoded_uv_width = decoded_y_width >> m_parser->m_subsampling_x; - Gfx::Size output_uv_size = { - output_y_size.width() >> m_parser->m_subsampling_x, - output_y_size.height() >> m_parser->m_subsampling_y, - }; - Array, 3> output_buffers = { - DECODER_TRY_ALLOC(FixedArray::try_create(output_y_size.width() * output_y_size.height())), - DECODER_TRY_ALLOC(FixedArray::try_create(output_uv_size.width() * output_uv_size.height())), - DECODER_TRY_ALLOC(FixedArray::try_create(output_uv_size.width() * output_uv_size.height())), - }; - for (u8 plane = 0; plane < 3; plane++) { - auto& buffer = output_buffers[plane]; - auto decoded_width = plane == 0 ? decoded_y_width : decoded_uv_width; - auto output_size = plane == 0 ? output_y_size : output_uv_size; - auto const& decoded_buffer = get_output_buffer(plane); + if (m_video_frame_queue.is_empty()) + return DecoderError::format(DecoderErrorCategory::NeedsMoreInput, "No video frame in queue."); - for (u32 row = 0; row < output_size.height(); row++) { - memcpy( - buffer.data() + row * output_size.width(), - decoded_buffer.data() + row * decoded_width, - output_size.width() * sizeof(*buffer.data())); - } - } - - return DECODER_TRY_ALLOC(adopt_nonnull_own_or_enomem(new (nothrow) SubsampledYUVFrame( - { output_y_size.width(), output_y_size.height() }, - m_parser->m_bit_depth, get_cicp_color_space(), - m_parser->m_subsampling_x, m_parser->m_subsampling_y, - output_buffers[0], output_buffers[1], output_buffers[2]))); + return m_video_frame_queue.dequeue(); } u8 Decoder::merge_prob(u8 pre_prob, u8 count_0, u8 count_1, u8 count_sat, u8 max_update_factor) diff --git a/Userland/Libraries/LibVideo/VP9/Decoder.h b/Userland/Libraries/LibVideo/VP9/Decoder.h index 11e2671c36..41a9123f04 100644 --- a/Userland/Libraries/LibVideo/VP9/Decoder.h +++ b/Userland/Libraries/LibVideo/VP9/Decoder.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,7 @@ private: typedef i32 Intermediate; DecoderErrorOr decode_frame(Span); + DecoderErrorOr create_video_frame(); DecoderErrorOr allocate_buffers(); Vector& get_temp_buffer(u8 plane); @@ -167,6 +169,8 @@ private: Vector intermediate[3]; Vector output[3]; } m_buffers; + + Queue, 1> m_video_frame_queue; }; }