1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 05:07:45 +00:00

LibVideo/VP9: Use a struct for block context to keep between frames

There are three fields that we need to store from FrameBlockContext to
keep between frames, which are used to parse for those same fields for
the next frame.
This commit is contained in:
Zaggy1024 2022-11-20 12:42:17 -06:00 committed by Andreas Kling
parent 5275a1101e
commit 4a4aa697d9
4 changed files with 75 additions and 28 deletions

View file

@ -93,6 +93,39 @@ public:
return m_storage[index_at(row, column)]; return m_storage[index_at(row, column)];
} }
template<typename OtherT>
ErrorOr<void> try_resize_to_match_other_vector2d(Vector2D<OtherT> const& other)
{
return try_resize(other.height(), other.width());
}
template<typename OtherT, typename Function>
void copy_to(Vector2D<OtherT>& other, Function function)
{
VERIFY(width() <= other.width());
VERIFY(height() <= other.height());
for (u32 row = 0; row < height(); row++) {
for (u32 column = 0; column < width(); column++)
other.at(row, column) = function(at(row, column));
}
}
void copy_to(Vector2D<T>& other)
{
VERIFY(width() <= other.width());
VERIFY(height() <= other.height());
for (u32 row = 0; row < height(); row++) {
auto other_index = other.index_at(row, 0);
AK::TypedTransfer<T>::copy(&m_storage[index_at(row, 0)], &other[other_index], width());
}
}
void reset()
{
for (size_t i = 0; i < size(); i++)
m_storage[i] = T();
}
private: private:
u32 m_height { 0 }; u32 m_height { 0 };
u32 m_width { 0 }; u32 m_width { 0 };
@ -124,4 +157,25 @@ struct FrameBlockContext {
u8 segment_id { 0 }; u8 segment_id { 0 };
}; };
// Block context that is kept between frames until explictly cleared.
struct PersistentBlockContext {
PersistentBlockContext()
: available(false)
{
}
PersistentBlockContext(FrameBlockContext const& frame_context)
: available(frame_context.is_available)
, ref_frames(frame_context.ref_frames)
, primary_motion_vector_pair(frame_context.primary_motion_vector_pair())
, segment_id(frame_context.segment_id)
{
}
bool available { false };
ReferenceFramePair ref_frames { ReferenceFrameType::None, ReferenceFrameType::None };
MotionVectorPair primary_motion_vector_pair {};
u8 segment_id { 0 };
};
} }

View file

@ -66,14 +66,7 @@ DecoderErrorOr<void> Decoder::decode_frame(ReadonlyBytes frame_data)
// show_existing_frame is equal to 0, // show_existing_frame is equal to 0,
// segmentation_enabled is equal to 1, // segmentation_enabled is equal to 1,
// segmentation_update_map is equal to 1. // segmentation_update_map is equal to 1.
if (!m_parser->m_show_existing_frame && m_parser->m_segmentation_enabled && m_parser->m_segmentation_update_map) { // This is handled by update_reference_frames.
for (auto row = 0u; row < m_parser->m_mi_rows; row++) {
for (auto column = 0u; column < m_parser->m_mi_cols; column++) {
auto index = index_from_row_and_column(row, column, m_parser->m_mi_rows);
m_parser->m_prev_segment_ids[index] = m_parser->m_frame_block_contexts[index].segment_id;
}
}
}
// 4. The output process as specified in section 8.9 is invoked. // 4. The output process as specified in section 8.9 is invoked.
if (m_parser->m_show_frame) if (m_parser->m_show_frame)
@ -1808,21 +1801,24 @@ DecoderErrorOr<void> Decoder::update_reference_frames()
// 2. If show_existing_frame is equal to 0, the following applies: // 2. If show_existing_frame is equal to 0, the following applies:
if (!m_parser->m_show_existing_frame) { if (!m_parser->m_show_existing_frame) {
DECODER_TRY_ALLOC(m_parser->m_previous_block_contexts.try_resize_to_match_other_vector2d(m_parser->m_frame_block_contexts));
// PrevRefFrames[ row ][ col ][ list ] is set equal to RefFrames[ row ][ col ][ list ] for row = 0..MiRows-1, // PrevRefFrames[ row ][ col ][ list ] is set equal to RefFrames[ row ][ col ][ list ] for row = 0..MiRows-1,
// for col = 0..MiCols-1, for list = 0..1. // for col = 0..MiCols-1, for list = 0..1.
// PrevMvs[ row ][ col ][ list ][ comp ] is set equal to Mvs[ row ][ col ][ list ][ comp ] for row = 0..MiRows-1, // PrevMvs[ row ][ col ][ list ][ comp ] is set equal to Mvs[ row ][ col ][ list ][ comp ] for row = 0..MiRows-1,
// for col = 0..MiCols-1, for list = 0..1, for comp = 0..1. // for col = 0..MiCols-1, for list = 0..1, for comp = 0..1.
size_t size = m_parser->m_frame_block_contexts.width() * m_parser->m_frame_block_contexts.height(); // And from decode_frame():
m_parser->m_prev_ref_frames.resize_and_keep_capacity(size); // - If all of the following conditions are true, PrevSegmentIds[ row ][ col ] is set equal to
m_parser->m_prev_mvs.resize_and_keep_capacity(size); // SegmentIds[ row ][ col ] for row = 0..MiRows-1, for col = 0..MiCols-1:
for (u32 row = 0; row < m_parser->m_frame_block_contexts.height(); row++) { // show_existing_frame is equal to 0,
for (u32 column = 0; column < m_parser->m_frame_block_contexts.width(); column++) { // segmentation_enabled is equal to 1,
auto index = m_parser->m_frame_block_contexts.index_at(row, column); // segmentation_update_map is equal to 1.
auto context = m_parser->m_frame_block_contexts[index]; bool keep_segment_ids = !m_parser->m_show_existing_frame && m_parser->m_segmentation_enabled && m_parser->m_segmentation_update_map;
m_parser->m_prev_ref_frames[index] = context.ref_frames; m_parser->m_frame_block_contexts.copy_to(m_parser->m_previous_block_contexts, [keep_segment_ids](FrameBlockContext context) {
m_parser->m_prev_mvs[index] = context.primary_motion_vector_pair(); auto persistent_context = PersistentBlockContext(context);
} if (!keep_segment_ids)
} persistent_context.segment_id = 0;
return persistent_context;
});
} }
return {}; return {};

View file

@ -494,9 +494,8 @@ void Parser::setup_past_independence()
m_feature_enabled[i][j] = false; m_feature_enabled[i][j] = false;
} }
} }
m_previous_block_contexts.reset();
m_segmentation_abs_or_delta_update = false; m_segmentation_abs_or_delta_update = false;
m_prev_segment_ids.clear_with_capacity();
m_prev_segment_ids.resize_and_keep_capacity(m_mi_rows * m_mi_cols);
m_loop_filter_delta_enabled = true; m_loop_filter_delta_enabled = true;
m_loop_filter_ref_deltas[IntraFrame] = 1; m_loop_filter_ref_deltas[IntraFrame] = 1;
m_loop_filter_ref_deltas[LastFrame] = 0; m_loop_filter_ref_deltas[LastFrame] = 0;
@ -1111,7 +1110,7 @@ u8 Parser::get_segment_id()
u8 segment = 7; u8 segment = 7;
for (size_t y = 0; y < ymis; y++) { for (size_t y = 0; y < ymis; y++) {
for (size_t x = 0; x < xmis; x++) { for (size_t x = 0; x < xmis; x++) {
segment = min(segment, m_prev_segment_ids[get_image_index(m_mi_row + y, m_mi_col + x)]); segment = min(segment, m_previous_block_contexts.index_at(m_mi_row + y, m_mi_col + x));
} }
} }
return segment; return segment;
@ -1498,10 +1497,10 @@ void Parser::add_mv_ref_list(u8 ref_list)
void Parser::get_block_mv(u32 candidate_row, u32 candidate_column, u8 ref_list, bool use_prev) void Parser::get_block_mv(u32 candidate_row, u32 candidate_column, u8 ref_list, bool use_prev)
{ {
auto index = get_image_index(candidate_row, candidate_column);
if (use_prev) { if (use_prev) {
m_candidate_mv[ref_list] = m_prev_mvs[index][ref_list]; auto const& prev_context = m_previous_block_contexts.at(candidate_row, candidate_column);
m_candidate_frame[ref_list] = m_prev_ref_frames[index][ref_list]; m_candidate_mv[ref_list] = prev_context.primary_motion_vector_pair[ref_list];
m_candidate_frame[ref_list] = prev_context.ref_frames[ref_list];
} else { } else {
auto const& current_context = m_frame_block_contexts.at(candidate_row, candidate_column); auto const& current_context = m_frame_block_contexts.at(candidate_row, candidate_column);
m_candidate_mv[ref_list] = current_context.primary_motion_vector_pair()[ref_list]; m_candidate_mv[ref_list] = current_context.primary_motion_vector_pair()[ref_list];

View file

@ -259,7 +259,6 @@ private:
ReferenceFramePair m_comp_var_ref; ReferenceFramePair m_comp_var_ref;
// FIXME: Use Array<MotionVectorPair, 4> instead. // FIXME: Use Array<MotionVectorPair, 4> instead.
Array<Array<MotionVector, 4>, 2> m_block_mvs; Array<Array<MotionVector, 4>, 2> m_block_mvs;
Vector<u8> m_prev_segment_ids;
// FIXME: From spec: NOTE We are using a 2D array to store the SubModes for clarity. It is possible to reduce memory // FIXME: From spec: NOTE We are using a 2D array to store the SubModes for clarity. It is possible to reduce memory
// consumption by only storing one intra mode for each 8x8 horizontal and vertical position, i.e. to use two 1D // consumption by only storing one intra mode for each 8x8 horizontal and vertical position, i.e. to use two 1D
@ -273,8 +272,7 @@ private:
u8 m_ref_mv_count { 0 }; u8 m_ref_mv_count { 0 };
MotionVectorPair m_ref_list_mv; MotionVectorPair m_ref_list_mv;
bool m_use_prev_frame_mvs; bool m_use_prev_frame_mvs;
Vector<ReferenceFramePair> m_prev_ref_frames; Vector2D<PersistentBlockContext> m_previous_block_contexts;
Vector<MotionVectorPair> m_prev_mvs;
// Indexed by ReferenceFrame enum. // Indexed by ReferenceFrame enum.
u8 m_mode_context[4] { INVALID_CASE }; u8 m_mode_context[4] { INVALID_CASE };