1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 13:57:35 +00:00

LibVideo/VP9: Index inter-frame references with named fields or an enum

Previously, we were using size_t, often coerced from bool or u8, to
index reference pairs. Now, they must either be taken directly from
named fields or indexed using the `ReferenceIndex` enum with options
`primary` and `secondary`. With a more explicit method of indexing
these, the compiler can aid in using reference pairs correctly, and
fuzzers may be able to detect undefined behavior more easily.
This commit is contained in:
Zaggy1024 2022-11-25 03:03:22 -06:00 committed by Andreas Kling
parent 3af4deba6d
commit f4761dab09
8 changed files with 174 additions and 157 deletions

View file

@ -19,27 +19,30 @@
namespace Video::VP9 {
template<typename T>
struct Pair {
T a;
T b;
struct ReferencePair {
T primary;
T secondary;
T& operator[](u8 index)
T& operator[](ReferenceIndex index)
{
if (index == 0)
return a;
if (index == 1)
return b;
VERIFY_NOT_REACHED();
switch (index) {
case ReferenceIndex::Primary:
return primary;
case ReferenceIndex::Secondary:
return secondary;
default:
VERIFY_NOT_REACHED();
}
}
T const& operator[](u8 index) const
T const& operator[](ReferenceIndex index) const
{
return const_cast<Pair<T>&>(*this)[index];
return const_cast<ReferencePair<T>&>(*this)[index];
}
};
typedef Pair<ReferenceFrameType> ReferenceFramePair;
typedef Pair<MotionVector> MotionVectorPair;
typedef ReferencePair<ReferenceFrameType> ReferenceFramePair;
typedef ReferencePair<MotionVector> MotionVectorPair;
template<typename T>
class Vector2D;
@ -201,8 +204,8 @@ struct TokensContext {
// Block context that is kept for the lifetime of a frame.
struct FrameBlockContext {
bool is_intra_predicted() const { return ref_frames[0] == ReferenceFrameType::None; }
bool is_single_reference() const { return ref_frames[1] == ReferenceFrameType::None; }
bool is_intra_predicted() const { return ref_frames.primary == ReferenceFrameType::None; }
bool is_single_reference() const { return ref_frames.secondary == ReferenceFrameType::None; }
MotionVectorPair primary_motion_vector_pair() const { return sub_block_motion_vectors[3]; }
bool is_available { false };
@ -410,8 +413,8 @@ struct BlockContext {
TXSize tx_size { TXSize::TX_4x4 };
ReferenceFramePair reference_frame_types;
bool is_inter_predicted() const { return reference_frame_types[0] > ReferenceFrameType::None; }
bool is_compound() const { return reference_frame_types[1] > ReferenceFrameType::None; }
bool is_inter_predicted() const { return reference_frame_types.primary != ReferenceFrameType::None; }
bool is_compound() const { return reference_frame_types.secondary != ReferenceFrameType::None; }
Array<PredictionMode, 4> sub_block_prediction_modes;
PredictionMode y_prediction_mode() const { return sub_block_prediction_modes.last(); }
@ -428,7 +431,7 @@ struct BlockMotionVectorCandidateSet {
MotionVector best_vector;
};
using BlockMotionVectorCandidates = Pair<BlockMotionVectorCandidateSet>;
using BlockMotionVectorCandidates = ReferencePair<BlockMotionVectorCandidateSet>;
struct MotionVectorCandidate {
ReferenceFrameType type;

View file

@ -671,7 +671,7 @@ DecoderErrorOr<void> Decoder::predict_intra(u8 plane, BlockContext const& block_
return {};
}
MotionVector Decoder::select_motion_vector(u8 plane, BlockContext const& block_context, u8 ref_list, u32 block_index)
MotionVector Decoder::select_motion_vector(u8 plane, BlockContext const& block_context, ReferenceIndex reference_index, u32 block_index)
{
// The inputs to this process are:
// a variable plane specifying which plane is being predicted,
@ -706,27 +706,27 @@ MotionVector Decoder::select_motion_vector(u8 plane, BlockContext const& block_c
// If plane is equal to 0, or MiSize is greater than or equal to BLOCK_8X8, mv is set equal to
// BlockMvs[ refList ][ blockIdx ].
if (plane == 0 || block_context.size >= Block_8x8)
return vectors[block_index][ref_list];
return vectors[block_index][reference_index];
// Otherwise, if subsampling_x is equal to 0 and subsampling_y is equal to 0, mv is set equal to
// BlockMvs[ refList ][ blockIdx ].
if (!block_context.frame_context.color_config.subsampling_x && !block_context.frame_context.color_config.subsampling_y)
return vectors[block_index][ref_list];
return vectors[block_index][reference_index];
// Otherwise, if subsampling_x is equal to 0 and subsampling_y is equal to 1, mv[ comp ] is set equal to
// round_mv_comp_q2( BlockMvs[ refList ][ blockIdx ][ comp ] + BlockMvs[ refList ][ blockIdx + 2 ][ comp ] )
// for comp = 0..1.
if (!block_context.frame_context.color_config.subsampling_x && block_context.frame_context.color_config.subsampling_y)
return round_mv_comp_q2(vectors[block_index][ref_list] + vectors[block_index + 2][ref_list]);
return round_mv_comp_q2(vectors[block_index][reference_index] + vectors[block_index + 2][reference_index]);
// Otherwise, if subsampling_x is equal to 1 and subsampling_y is equal to 0, mv[ comp ] is set equal to
// round_mv_comp_q2( BlockMvs[ refList ][ blockIdx ][ comp ] + BlockMvs[ refList ][ blockIdx + 1 ][ comp ] )
// for comp = 0..1.
if (block_context.frame_context.color_config.subsampling_x && !block_context.frame_context.color_config.subsampling_y)
return round_mv_comp_q2(vectors[block_index][ref_list] + vectors[block_index + 1][ref_list]);
return round_mv_comp_q2(vectors[block_index][reference_index] + vectors[block_index + 1][reference_index]);
// Otherwise, (subsampling_x is equal to 1 and subsampling_y is equal to 1), mv[ comp ] is set equal to
// round_mv_comp_q4( BlockMvs[ refList ][ 0 ][ comp ] + BlockMvs[ refList ][ 1 ][ comp ] +
// BlockMvs[ refList ][ 2 ][ comp ] + BlockMvs[ refList ][ 3 ][ comp ] ) for comp = 0..1.
VERIFY(block_context.frame_context.color_config.subsampling_x && block_context.frame_context.color_config.subsampling_y);
return round_mv_comp_q4(vectors[0][ref_list] + vectors[1][ref_list]
+ vectors[2][ref_list] + vectors[3][ref_list]);
return round_mv_comp_q4(vectors[0][reference_index] + vectors[1][reference_index]
+ vectors[2][reference_index] + vectors[3][reference_index]);
}
MotionVector Decoder::clamp_motion_vector(u8 plane, BlockContext const& block_context, u32 block_row, u32 block_column, MotionVector vector)
@ -761,12 +761,12 @@ MotionVector Decoder::clamp_motion_vector(u8 plane, BlockContext const& block_co
};
}
DecoderErrorOr<void> Decoder::predict_inter_block(u8 plane, BlockContext const& block_context, u8 ref_list, u32 block_row, u32 block_column, u32 x, u32 y, u32 width, u32 height, u32 block_index, Span<u16> block_buffer)
DecoderErrorOr<void> Decoder::predict_inter_block(u8 plane, BlockContext const& block_context, ReferenceIndex reference_index, u32 block_row, u32 block_column, u32 x, u32 y, u32 width, u32 height, u32 block_index, Span<u16> block_buffer)
{
VERIFY(width <= maximum_block_dimensions && height <= maximum_block_dimensions);
// 2. The motion vector selection process in section 8.5.2.1 is invoked with plane, refList, blockIdx as inputs
// and the output being the motion vector mv.
auto motion_vector = select_motion_vector(plane, block_context, ref_list, block_index);
auto motion_vector = select_motion_vector(plane, block_context, reference_index, block_index);
// 3. The motion vector clamping process in section 8.5.2.2 is invoked with plane, mv as inputs and the output
// being the clamped motion vector clampedMv
@ -789,7 +789,7 @@ DecoderErrorOr<void> Decoder::predict_inter_block(u8 plane, BlockContext const&
// A variable refIdx specifying which reference frame is being used is set equal to
// ref_frame_idx[ ref_frame[ refList ] - LAST_FRAME ].
auto reference_frame_index = block_context.frame_context.reference_frame_indices[block_context.reference_frame_types[ref_list] - LastFrame];
auto reference_frame_index = block_context.frame_context.reference_frame_indices[block_context.reference_frame_types[reference_index] - LastFrame];
// It is a requirement of bitstream conformance that all the following conditions are satisfied:
// 2 * FrameWidth >= RefFrameWidth[ refIdx ]
@ -943,14 +943,12 @@ DecoderErrorOr<void> Decoder::predict_inter(u8 plane, BlockContext const& block_
// a variable blockIdx, specifying how much of the block has already been predicted in units of 4x4 samples.
// The outputs of this process are inter predicted samples in the current frame CurrFrame.
// The variable isCompound is set equal to ref_frame[ 1 ] > NONE.
auto is_compound = block_context.reference_frame_types[1] > None;
// The prediction arrays are formed by the following ordered steps:
// 1. The variable refList is set equal to 0.
// 2. through 5.
Array<u16, maximum_block_size> predicted_buffer;
auto predicted_span = predicted_buffer.span().trim(width * height);
TRY(predict_inter_block(plane, block_context, 0, block_context.row, block_context.column, x, y, width, height, block_index, predicted_span));
TRY(predict_inter_block(plane, block_context, ReferenceIndex::Primary, block_context.row, block_context.column, x, y, width, height, block_index, predicted_span));
auto predicted_buffer_at = [&](Span<u16> buffer, u32 row, u32 column) -> u16& {
return buffer[row * width + column];
};
@ -969,9 +967,10 @@ DecoderErrorOr<void> Decoder::predict_inter(u8 plane, BlockContext const& block_
auto width_in_frame_buffer = min(width, frame_width - x);
auto height_in_frame_buffer = min(height, frame_height - y);
// The variable isCompound is set equal to ref_frame[ 1 ] > NONE.
// If isCompound is equal to 0, CurrFrame[ plane ][ y + i ][ x + j ] is set equal to preds[ 0 ][ i ][ j ] for i = 0..h-1
// and j = 0..w-1.
if (!is_compound) {
if (!block_context.is_compound()) {
for (auto i = 0u; i < height_in_frame_buffer; i++) {
for (auto j = 0u; j < width_in_frame_buffer; j++)
frame_buffer_at(y + i, x + j) = predicted_buffer_at(predicted_span, i, j);
@ -984,7 +983,7 @@ DecoderErrorOr<void> Decoder::predict_inter(u8 plane, BlockContext const& block_
// for i = 0..h-1 and j = 0..w-1.
Array<u16, maximum_block_size> second_predicted_buffer;
auto second_predicted_span = second_predicted_buffer.span().trim(width * height);
TRY(predict_inter_block(plane, block_context, 1, block_context.row, block_context.column, x, y, width, height, block_index, second_predicted_span));
TRY(predict_inter_block(plane, block_context, ReferenceIndex::Secondary, block_context.row, block_context.column, x, y, width, height, block_index, second_predicted_span));
for (auto i = 0u; i < height_in_frame_buffer; i++) {
for (auto j = 0u; j < width_in_frame_buffer; j++)

View file

@ -63,13 +63,13 @@ private:
// (8.5.1) Inter prediction process
DecoderErrorOr<void> predict_inter(u8 plane, BlockContext const& block_context, u32 x, u32 y, u32 width, u32 height, u32 block_index);
// (8.5.2.1) Motion vector selection process
MotionVector select_motion_vector(u8 plane, BlockContext const&, u8 ref_list, u32 block_index);
MotionVector select_motion_vector(u8 plane, BlockContext const&, ReferenceIndex, u32 block_index);
// (8.5.2.2) Motion vector clamping process
MotionVector clamp_motion_vector(u8 plane, BlockContext const&, u32 block_row, u32 block_column, MotionVector vector);
// (8.5.2.3) Motion vector scaling process
DecoderErrorOr<MotionVector> scale_motion_vector(u8 plane, u8 ref_list, u32 x, u32 y, MotionVector vector);
DecoderErrorOr<MotionVector> scale_motion_vector(u8 plane, ReferenceIndex, u32 x, u32 y, MotionVector vector);
// From (8.5.1) Inter prediction process, steps 2-5
DecoderErrorOr<void> predict_inter_block(u8 plane, BlockContext const&, u8 ref_list, u32 block_row, u32 block_column, u32 x, u32 y, u32 width, u32 height, u32 block_index, Span<u16> block_buffer);
DecoderErrorOr<void> predict_inter_block(u8 plane, BlockContext const&, ReferenceIndex, u32 block_row, u32 block_column, u32 x, u32 y, u32 width, u32 height, u32 block_index, Span<u16> block_buffer);
/* (8.6) Reconstruction and Dequantization */

View file

@ -66,6 +66,11 @@ enum ReferenceMode : u8 {
ReferenceModeSelect = 2,
};
enum class ReferenceIndex : u8 {
Primary = 0,
Secondary = 1,
};
enum BlockSubsize : u8 {
Block_4x4 = 0,
Block_4x8 = 1,

View file

@ -1151,23 +1151,25 @@ DecoderErrorOr<void> Parser::intra_block_mode_info(BlockContext& block_context)
return {};
}
static void select_best_reference_motion_vectors(BlockContext& block_context, MotionVectorPair reference_motion_vectors, BlockMotionVectorCandidates& candidates, u8 ref_list);
static void select_best_reference_motion_vectors(BlockContext& block_context, MotionVectorPair reference_motion_vectors, BlockMotionVectorCandidates& candidates, ReferenceIndex);
DecoderErrorOr<void> Parser::inter_block_mode_info(BlockContext& block_context, FrameBlockContext above_context, FrameBlockContext left_context)
{
TRY(read_ref_frames(block_context, above_context, left_context));
VERIFY(block_context.is_inter_predicted());
BlockMotionVectorCandidates motion_vector_candidates;
for (auto j = 0; j < 2; j++) {
if (block_context.reference_frame_types[j] > IntraFrame) {
auto reference_motion_vectors = find_reference_motion_vectors(block_context, block_context.reference_frame_types[j], -1);
select_best_reference_motion_vectors(block_context, reference_motion_vectors, motion_vector_candidates, j);
}
auto reference_motion_vectors = find_reference_motion_vectors(block_context, block_context.reference_frame_types.primary, -1);
select_best_reference_motion_vectors(block_context, reference_motion_vectors, motion_vector_candidates, ReferenceIndex::Primary);
if (block_context.is_compound()) {
auto reference_motion_vectors = find_reference_motion_vectors(block_context, block_context.reference_frame_types.secondary, -1);
select_best_reference_motion_vectors(block_context, reference_motion_vectors, motion_vector_candidates, ReferenceIndex::Secondary);
}
if (seg_feature_active(block_context, SEG_LVL_SKIP)) {
block_context.y_prediction_mode() = PredictionMode::ZeroMv;
} else if (block_context.size >= Block_8x8) {
block_context.y_prediction_mode() = TRY_READ(TreeParser::parse_inter_mode(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, m_mode_context[block_context.reference_frame_types[0]]));
block_context.y_prediction_mode() = TRY_READ(TreeParser::parse_inter_mode(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, m_mode_context[block_context.reference_frame_types.primary]));
}
if (block_context.frame_context.interpolation_filter == Switchable)
block_context.interpolation_filter = TRY_READ(TreeParser::parse_interpolation_filter(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, above_context, left_context));
@ -1177,10 +1179,11 @@ DecoderErrorOr<void> Parser::inter_block_mode_info(BlockContext& block_context,
auto size_in_4x4_blocks = block_context.get_size_in_4x4_blocks();
for (auto idy = 0; idy < 2; idy += size_in_4x4_blocks.height()) {
for (auto idx = 0; idx < 2; idx += size_in_4x4_blocks.width()) {
block_context.y_prediction_mode() = TRY_READ(TreeParser::parse_inter_mode(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, m_mode_context[block_context.reference_frame_types[0]]));
block_context.y_prediction_mode() = TRY_READ(TreeParser::parse_inter_mode(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, m_mode_context[block_context.reference_frame_types.primary]));
if (block_context.y_prediction_mode() == PredictionMode::NearestMv || block_context.y_prediction_mode() == PredictionMode::NearMv) {
for (auto j = 0; j < 1 + block_context.is_compound(); j++)
select_best_sub_block_reference_motion_vectors(block_context, motion_vector_candidates, idy * 2 + idx, j);
select_best_sub_block_reference_motion_vectors(block_context, motion_vector_candidates, idy * 2 + idx, ReferenceIndex::Primary);
if (block_context.is_compound())
select_best_sub_block_reference_motion_vectors(block_context, motion_vector_candidates, idy * 2 + idx, ReferenceIndex::Secondary);
}
auto new_motion_vector_pair = TRY(get_motion_vector(block_context, motion_vector_candidates));
for (auto y = 0; y < size_in_4x4_blocks.height(); y++) {
@ -1211,27 +1214,28 @@ DecoderErrorOr<void> Parser::read_ref_frames(BlockContext& block_context, FrameB
if (compound_mode == ReferenceModeSelect)
compound_mode = TRY_READ(TreeParser::parse_comp_mode(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, fixed_reference, above_context, left_context));
if (compound_mode == CompoundReference) {
auto secondary_reference = block_context.frame_context.variable_reference_types;
auto fixed_reference_index = block_context.frame_context.reference_frame_sign_biases[fixed_reference];
auto variable_reference_index = !fixed_reference_index;
auto variable_references = block_context.frame_context.variable_reference_types;
// FIXME: Create an enum for compound frame references using names Primary and Secondary.
auto secondary_reference_selection = TRY_READ(TreeParser::parse_comp_ref(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, fixed_reference, secondary_reference, variable_reference_index, above_context, left_context));
auto fixed_reference_index = ReferenceIndex::Primary;
auto variable_reference_index = ReferenceIndex::Secondary;
if (block_context.frame_context.reference_frame_sign_biases[fixed_reference])
swap(fixed_reference_index, variable_reference_index);
auto variable_reference_selection = TRY_READ(TreeParser::parse_comp_ref(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, fixed_reference, variable_references, variable_reference_index, above_context, left_context));
block_context.reference_frame_types[fixed_reference_index] = fixed_reference;
block_context.reference_frame_types[variable_reference_index] = secondary_reference[secondary_reference_selection];
block_context.reference_frame_types[variable_reference_index] = variable_references[variable_reference_selection];
return {};
}
// FIXME: Maybe consolidate this into a tree. Context is different between part 1 and 2 but still, it would look nice here.
ReferenceFrameType primary_type = ReferenceFrameType::LastFrame;
auto single_ref_p1 = TRY_READ(TreeParser::parse_single_ref_part_1(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, above_context, left_context));
if (single_ref_p1) {
auto single_ref_p2 = TRY_READ(TreeParser::parse_single_ref_part_2(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, above_context, left_context));
block_context.reference_frame_types[0] = single_ref_p2 ? AltRefFrame : GoldenFrame;
} else {
block_context.reference_frame_types[0] = LastFrame;
primary_type = single_ref_p2 ? ReferenceFrameType::AltRefFrame : ReferenceFrameType::GoldenFrame;
}
block_context.reference_frame_types[1] = None;
block_context.reference_frame_types = { primary_type, ReferenceFrameType::None };
return {};
}
@ -1239,22 +1243,26 @@ DecoderErrorOr<void> Parser::read_ref_frames(BlockContext& block_context, FrameB
DecoderErrorOr<MotionVectorPair> Parser::get_motion_vector(BlockContext const& block_context, BlockMotionVectorCandidates const& candidates)
{
MotionVectorPair result;
for (auto i = 0; i < 1 + block_context.is_compound(); i++) {
auto read_one = [&](ReferenceIndex index) -> DecoderErrorOr<void> {
switch (block_context.y_prediction_mode()) {
case PredictionMode::NewMv:
result[i] = TRY(read_motion_vector(block_context, candidates, i));
result[index] = TRY(read_motion_vector(block_context, candidates, index));
break;
case PredictionMode::NearestMv:
result[i] = candidates[i].nearest_vector;
result[index] = candidates[index].nearest_vector;
break;
case PredictionMode::NearMv:
result[i] = candidates[i].near_vector;
result[index] = candidates[index].near_vector;
break;
default:
result[i] = {};
result[index] = {};
break;
}
}
return {};
};
TRY(read_one(ReferenceIndex::Primary));
if (block_context.is_compound())
TRY(read_one(ReferenceIndex::Secondary));
return result;
}
@ -1265,7 +1273,7 @@ static bool should_use_high_precision_motion_vector(MotionVector const& delta_ve
}
// read_mv( ref ) in the spec.
DecoderErrorOr<MotionVector> Parser::read_motion_vector(BlockContext const& block_context, BlockMotionVectorCandidates const& candidates, u8 reference_index)
DecoderErrorOr<MotionVector> Parser::read_motion_vector(BlockContext const& block_context, BlockMotionVectorCandidates const& candidates, ReferenceIndex reference_index)
{
m_use_hp = block_context.frame_context.high_precision_motion_vectors_allowed && should_use_high_precision_motion_vector(candidates[reference_index].best_vector);
MotionVector diff_mv;
@ -1503,22 +1511,22 @@ static void add_motion_vector_to_list_deduped(MotionVector const& vector, Vector
}
// get_block_mv( candidateR, candidateC, refList, usePrev ) in the spec.
MotionVectorCandidate Parser::get_motion_vector_from_current_or_previous_frame(BlockContext const& block_context, MotionVector candidate_vector, u8 ref_list, bool use_prev)
MotionVectorCandidate Parser::get_motion_vector_from_current_or_previous_frame(BlockContext const& block_context, MotionVector candidate_vector, ReferenceIndex reference_index, bool use_prev)
{
if (use_prev) {
auto const& prev_context = m_previous_block_contexts.at(candidate_vector.row(), candidate_vector.column());
return { prev_context.ref_frames[ref_list], prev_context.primary_motion_vector_pair[ref_list] };
return { prev_context.ref_frames[reference_index], prev_context.primary_motion_vector_pair[reference_index] };
}
auto const& current_context = block_context.frame_block_contexts().at(candidate_vector.row(), candidate_vector.column());
return { current_context.ref_frames[ref_list], current_context.primary_motion_vector_pair()[ref_list] };
return { current_context.ref_frames[reference_index], current_context.primary_motion_vector_pair()[reference_index] };
}
// if_same_ref_frame_add_mv( candidateR, candidateC, refFrame, usePrev ) in the spec.
void Parser::add_motion_vector_if_reference_frame_type_is_same(BlockContext const& block_context, MotionVector candidate_vector, ReferenceFrameType ref_frame, Vector<MotionVector, 2>& list, bool use_prev)
{
for (auto ref_list = 0u; ref_list < 2; ref_list++) {
auto candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, ref_list, use_prev);
for (auto i = 0u; i < 2; i++) {
auto candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, static_cast<ReferenceIndex>(i), use_prev);
if (candidate.type == ref_frame) {
add_motion_vector_to_list_deduped(candidate.vector, list);
return;
@ -1536,13 +1544,13 @@ static void apply_sign_bias_to_motion_vector(FrameContext const& frame_context,
// if_diff_ref_frame_add_mv( candidateR, candidateC, refFrame, usePrev ) in the spec.
void Parser::add_motion_vector_if_reference_frame_type_is_different(BlockContext const& block_context, MotionVector candidate_vector, ReferenceFrameType ref_frame, Vector<MotionVector, 2>& list, bool use_prev)
{
auto first_candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, 0, use_prev);
auto first_candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, ReferenceIndex::Primary, use_prev);
if (first_candidate.type > ReferenceFrameType::IntraFrame && first_candidate.type != ref_frame) {
apply_sign_bias_to_motion_vector(block_context.frame_context, first_candidate, ref_frame);
add_motion_vector_to_list_deduped(first_candidate.vector, list);
}
auto second_candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, 1, use_prev);
auto second_candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, ReferenceIndex::Secondary, use_prev);
auto mvs_are_same = first_candidate.vector == second_candidate.vector;
if (second_candidate.type > ReferenceFrameType::IntraFrame && second_candidate.type != ref_frame && !mvs_are_same) {
apply_sign_bias_to_motion_vector(block_context.frame_context, second_candidate, ref_frame);
@ -1588,8 +1596,9 @@ MotionVectorPair Parser::find_reference_motion_vectors(BlockContext const& block
auto context = block_context.frame_block_contexts().at(candidate.row(), candidate.column());
context_counter += mode_2_counter[to_underlying(context.y_mode)];
for (auto ref_list = 0u; ref_list < 2; ref_list++) {
if (context.ref_frames[ref_list] == reference_frame) {
for (auto i = 0u; i < 2; i++) {
auto reference_index = static_cast<ReferenceIndex>(i);
if (context.ref_frames[reference_index] == reference_frame) {
// This section up until add_mv_ref_list() is defined in spec as get_sub_block_mv().
constexpr u8 idx_n_column_to_subblock[4][2] = {
{ 1, 2 },
@ -1599,7 +1608,7 @@ MotionVectorPair Parser::find_reference_motion_vectors(BlockContext const& block
};
auto index = block >= 0 ? idx_n_column_to_subblock[block][offset_vector.column() == 0] : 3;
add_motion_vector_to_list_deduped(context.sub_block_motion_vectors[index][ref_list], list);
add_motion_vector_to_list_deduped(context.sub_block_motion_vectors[index][reference_index], list);
break;
}
}
@ -1634,41 +1643,42 @@ MotionVectorPair Parser::find_reference_motion_vectors(BlockContext const& block
MotionVectorPair result;
for (auto i = 0u; i < list.size(); i++)
result[i] = list[i];
result[static_cast<ReferenceIndex>(i)] = list[i];
return result;
}
// find_best_ref_mvs( refList ) in the spec.
static void select_best_reference_motion_vectors(BlockContext& block_context, MotionVectorPair reference_motion_vectors, BlockMotionVectorCandidates& candidates, u8 reference_index)
static void select_best_reference_motion_vectors(BlockContext& block_context, MotionVectorPair reference_motion_vectors, BlockMotionVectorCandidates& candidates, ReferenceIndex reference_index)
{
for (auto i = 0u; i < MAX_MV_REF_CANDIDATES; i++) {
auto delta = reference_motion_vectors[i];
auto delta_row = delta.row();
auto delta_column = delta.column();
if (!block_context.frame_context.high_precision_motion_vectors_allowed || !should_use_high_precision_motion_vector(delta)) {
auto adjust_and_clamp_vector = [&](MotionVector& vector) {
auto delta_row = vector.row();
auto delta_column = vector.column();
if (!block_context.frame_context.high_precision_motion_vectors_allowed || !should_use_high_precision_motion_vector(vector)) {
if ((delta_row & 1) != 0)
delta_row += delta_row > 0 ? -1 : 1;
if ((delta_column & 1) != 0)
delta_column += delta_column > 0 ? -1 : 1;
}
delta = { delta_row, delta_column };
reference_motion_vectors[i] = clamp_motion_vector(block_context, delta, (BORDERINPIXELS - INTERP_EXTEND) << 3);
}
vector = { delta_row, delta_column };
vector = clamp_motion_vector(block_context, vector, (BORDERINPIXELS - INTERP_EXTEND) << 3);
};
adjust_and_clamp_vector(reference_motion_vectors.primary);
adjust_and_clamp_vector(reference_motion_vectors.secondary);
candidates[reference_index].nearest_vector = reference_motion_vectors[0];
candidates[reference_index].near_vector = reference_motion_vectors[1];
candidates[reference_index].best_vector = reference_motion_vectors[0];
candidates[reference_index].nearest_vector = reference_motion_vectors.primary;
candidates[reference_index].near_vector = reference_motion_vectors.secondary;
candidates[reference_index].best_vector = reference_motion_vectors.primary;
}
// append_sub8x8_mvs( block, refList ) in the spec.
void Parser::select_best_sub_block_reference_motion_vectors(BlockContext const& block_context, BlockMotionVectorCandidates& candidates, i32 block, u8 reference_index)
void Parser::select_best_sub_block_reference_motion_vectors(BlockContext const& block_context, BlockMotionVectorCandidates& candidates, i32 block, ReferenceIndex reference_index)
{
MotionVector sub_8x8_mvs[2];
Array<MotionVector, 2> sub_8x8_mvs;
MotionVectorPair reference_motion_vectors = find_reference_motion_vectors(block_context, block_context.reference_frame_types[reference_index], block);
auto destination_index = 0;
if (block == 0) {
for (auto i = 0u; i < 2; i++)
sub_8x8_mvs[destination_index++] = reference_motion_vectors[i];
sub_8x8_mvs[destination_index++] = reference_motion_vectors.primary;
sub_8x8_mvs[destination_index++] = reference_motion_vectors.secondary;
} else if (block <= 2) {
sub_8x8_mvs[destination_index++] = block_context.sub_block_motion_vectors[0][reference_index];
} else {
@ -1681,7 +1691,7 @@ void Parser::select_best_sub_block_reference_motion_vectors(BlockContext const&
}
for (auto n = 0u; n < 2 && destination_index < 2; n++) {
auto ref_list_vector = reference_motion_vectors[n];
auto ref_list_vector = reference_motion_vectors[static_cast<ReferenceIndex>(n)];
if (ref_list_vector != sub_8x8_mvs[0])
sub_8x8_mvs[destination_index++] = ref_list_vector;
}

View file

@ -114,7 +114,7 @@ private:
DecoderErrorOr<void> inter_block_mode_info(BlockContext&, FrameBlockContext above_context, FrameBlockContext left_context);
DecoderErrorOr<void> read_ref_frames(BlockContext&, FrameBlockContext above_context, FrameBlockContext left_context);
DecoderErrorOr<MotionVectorPair> get_motion_vector(BlockContext const&, BlockMotionVectorCandidates const&);
DecoderErrorOr<MotionVector> read_motion_vector(BlockContext const&, BlockMotionVectorCandidates const&, u8 reference_index);
DecoderErrorOr<MotionVector> read_motion_vector(BlockContext const&, BlockMotionVectorCandidates const&, ReferenceIndex);
DecoderErrorOr<i32> read_single_motion_vector_component(u8 component);
DecoderErrorOr<bool> residual(BlockContext&, bool has_block_above, bool has_block_left);
DecoderErrorOr<bool> tokens(BlockContext&, size_t plane, u32 x, u32 y, TXSize tx_size, u32 block_index);
@ -123,9 +123,9 @@ private:
/* (6.5) Motion Vector Prediction */
MotionVectorPair find_reference_motion_vectors(BlockContext const&, ReferenceFrameType, i32 block);
void select_best_sub_block_reference_motion_vectors(BlockContext const&, BlockMotionVectorCandidates&, i32 block, u8 ref_list);
void select_best_sub_block_reference_motion_vectors(BlockContext const&, BlockMotionVectorCandidates&, i32 block, ReferenceIndex);
size_t get_image_index(FrameContext const&, u32 row, u32 column) const;
MotionVectorCandidate get_motion_vector_from_current_or_previous_frame(BlockContext const&, MotionVector candidate_vector, u8 ref_list, bool use_prev);
MotionVectorCandidate get_motion_vector_from_current_or_previous_frame(BlockContext const&, MotionVector candidate_vector, ReferenceIndex, bool use_prev);
void add_motion_vector_if_reference_frame_type_is_same(BlockContext const&, MotionVector candidate_vector, ReferenceFrameType ref_frame, Vector<MotionVector, 2>& list, bool use_prev);
void add_motion_vector_if_reference_frame_type_is_different(BlockContext const&, MotionVector candidate_vector, ReferenceFrameType ref_frame, Vector<MotionVector, 2>& list, bool use_prev);

View file

@ -202,8 +202,8 @@ ErrorOr<InterpolationFilter> TreeParser::parse_interpolation_filter(BitStream& b
// NOTE: SWITCHABLE_FILTERS is not used in the spec for this function. Therefore, the number
// was demystified by referencing the reference codec libvpx:
// https://github.com/webmproject/libvpx/blob/705bf9de8c96cfe5301451f1d7e5c90a41c64e5f/vp9/common/vp9_pred_common.h#L69
u8 left_interp = left.ref_frames[0] > ReferenceFrameType::None ? left.interpolation_filter : SWITCHABLE_FILTERS;
u8 above_interp = above.ref_frames[0] > ReferenceFrameType::None ? above.interpolation_filter : SWITCHABLE_FILTERS;
u8 left_interp = !left.is_intra_predicted() ? left.interpolation_filter : SWITCHABLE_FILTERS;
u8 above_interp = !above.is_intra_predicted() ? above.interpolation_filter : SWITCHABLE_FILTERS;
u8 context = SWITCHABLE_FILTERS;
if (above_interp == left_interp || above_interp == SWITCHABLE_FILTERS)
context = left_interp;
@ -285,26 +285,26 @@ ErrorOr<ReferenceMode> TreeParser::parse_comp_mode(BitStream& bit_stream, Probab
u8 context;
if (above.is_available && left.is_available) {
if (above.is_single_reference() && left.is_single_reference()) {
auto is_above_fixed = above.ref_frames[0] == comp_fixed_ref;
auto is_left_fixed = left.ref_frames[0] == comp_fixed_ref;
auto is_above_fixed = above.ref_frames.primary == comp_fixed_ref;
auto is_left_fixed = left.ref_frames.primary == comp_fixed_ref;
context = is_above_fixed ^ is_left_fixed;
} else if (above.is_single_reference()) {
auto is_above_fixed = above.ref_frames[0] == comp_fixed_ref;
auto is_above_fixed = above.ref_frames.primary == comp_fixed_ref;
context = 2 + static_cast<u8>(is_above_fixed || above.is_intra_predicted());
} else if (left.is_single_reference()) {
auto is_left_fixed = left.ref_frames[0] == comp_fixed_ref;
auto is_left_fixed = left.ref_frames.primary == comp_fixed_ref;
context = 2 + static_cast<u8>(is_left_fixed || left.is_intra_predicted());
} else {
context = 4;
}
} else if (above.is_available) {
if (above.is_single_reference())
context = above.ref_frames[0] == comp_fixed_ref;
context = above.ref_frames.primary == comp_fixed_ref;
else
context = 3;
} else if (left.is_available) {
if (left.is_single_reference())
context = static_cast<u8>(left.ref_frames[0] == comp_fixed_ref);
context = static_cast<u8>(left.ref_frames.primary == comp_fixed_ref);
else
context = 3;
} else {
@ -317,7 +317,7 @@ ErrorOr<ReferenceMode> TreeParser::parse_comp_mode(BitStream& bit_stream, Probab
return value;
}
ErrorOr<bool> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTables const& probability_table, SyntaxElementCounter& counter, ReferenceFrameType comp_fixed_ref, ReferenceFramePair comp_var_ref, u8 variable_reference_index, FrameBlockContext above, FrameBlockContext left)
ErrorOr<ReferenceIndex> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTables const& probability_table, SyntaxElementCounter& counter, ReferenceFrameType comp_fixed_ref, ReferenceFramePair comp_var_ref, ReferenceIndex variable_reference_index, FrameBlockContext above, FrameBlockContext left)
{
// FIXME: Above and left contexts should be in structs.
@ -329,24 +329,24 @@ ErrorOr<bool> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTable
context = 2;
} else if (left.is_intra_predicted()) {
if (above.is_single_reference()) {
context = 1 + 2 * (above.ref_frames[0] != comp_var_ref[1]);
context = 1 + 2 * (above.ref_frames.primary != comp_var_ref.secondary);
} else {
context = 1 + 2 * (above.ref_frames[variable_reference_index] != comp_var_ref[1]);
context = 1 + 2 * (above.ref_frames[variable_reference_index] != comp_var_ref.secondary);
}
} else if (above.is_intra_predicted()) {
if (left.is_single_reference()) {
context = 1 + 2 * (left.ref_frames[0] != comp_var_ref[1]);
context = 1 + 2 * (left.ref_frames.primary != comp_var_ref.secondary);
} else {
context = 1 + 2 * (left.ref_frames[variable_reference_index] != comp_var_ref[1]);
context = 1 + 2 * (left.ref_frames[variable_reference_index] != comp_var_ref.secondary);
}
} else {
auto var_ref_above = above.is_single_reference() ? above.ref_frames[0] : above.ref_frames[variable_reference_index];
auto var_ref_left = left.is_single_reference() ? left.ref_frames[0] : left.ref_frames[variable_reference_index];
if (var_ref_above == var_ref_left && comp_var_ref[1] == var_ref_above) {
auto var_ref_above = above.is_single_reference() ? above.ref_frames.primary : above.ref_frames[variable_reference_index];
auto var_ref_left = left.is_single_reference() ? left.ref_frames.primary : left.ref_frames[variable_reference_index];
if (var_ref_above == var_ref_left && comp_var_ref.secondary == var_ref_above) {
context = 0;
} else if (left.is_single_reference() && above.is_single_reference()) {
if ((var_ref_above == comp_fixed_ref && var_ref_left == comp_var_ref[0])
|| (var_ref_left == comp_fixed_ref && var_ref_above == comp_var_ref[0])) {
if ((var_ref_above == comp_fixed_ref && var_ref_left == comp_var_ref.primary)
|| (var_ref_left == comp_fixed_ref && var_ref_above == comp_var_ref.primary)) {
context = 4;
} else if (var_ref_above == var_ref_left) {
context = 3;
@ -356,9 +356,9 @@ ErrorOr<bool> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTable
} else if (left.is_single_reference() || above.is_single_reference()) {
auto vrfc = left.is_single_reference() ? var_ref_above : var_ref_left;
auto rfs = above.is_single_reference() ? var_ref_above : var_ref_left;
if (vrfc == comp_var_ref[1] && rfs != comp_var_ref[1]) {
if (vrfc == comp_var_ref.secondary && rfs != comp_var_ref.secondary) {
context = 1;
} else if (rfs == comp_var_ref[1] && vrfc != comp_var_ref[1]) {
} else if (rfs == comp_var_ref.secondary && vrfc != comp_var_ref.secondary) {
context = 2;
} else {
context = 4;
@ -374,9 +374,9 @@ ErrorOr<bool> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTable
context = 2;
} else {
if (above.is_single_reference()) {
context = 3 * static_cast<u8>(above.ref_frames[0] != comp_var_ref[1]);
context = 3 * static_cast<u8>(above.ref_frames.primary != comp_var_ref.secondary);
} else {
context = 4 * static_cast<u8>(above.ref_frames[variable_reference_index] != comp_var_ref[1]);
context = 4 * static_cast<u8>(above.ref_frames[variable_reference_index] != comp_var_ref.secondary);
}
}
} else if (left.is_available) {
@ -384,9 +384,9 @@ ErrorOr<bool> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTable
context = 2;
} else {
if (left.is_single_reference()) {
context = 3 * static_cast<u8>(left.ref_frames[0] != comp_var_ref[1]);
context = 3 * static_cast<u8>(left.ref_frames.primary != comp_var_ref.secondary);
} else {
context = 4 * static_cast<u8>(left.ref_frames[variable_reference_index] != comp_var_ref[1]);
context = 4 * static_cast<u8>(left.ref_frames[variable_reference_index] != comp_var_ref.secondary);
}
}
} else {
@ -395,8 +395,8 @@ ErrorOr<bool> TreeParser::parse_comp_ref(BitStream& bit_stream, ProbabilityTable
u8 probability = probability_table.comp_ref_prob()[context];
auto value = TRY(parse_tree<bool>(bit_stream, { binary_tree }, [&](u8) { return probability; }));
increment_counter(counter.m_counts_comp_ref[context][value]);
auto value = TRY(parse_tree<ReferenceIndex>(bit_stream, { binary_tree }, [&](u8) { return probability; }));
increment_counter(counter.m_counts_comp_ref[context][to_underlying(value)]);
return value;
}
@ -411,27 +411,27 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_1(BitStream& bit_stream, Probabi
context = 2;
} else if (left.is_intra_predicted()) {
if (above.is_single_reference()) {
context = 4 * (above.ref_frames[0] == LastFrame);
context = 4 * (above.ref_frames.primary == LastFrame);
} else {
context = 1 + (above.ref_frames[0] == LastFrame || above.ref_frames[1] == LastFrame);
context = 1 + (above.ref_frames.primary == LastFrame || above.ref_frames.secondary == LastFrame);
}
} else if (above.is_intra_predicted()) {
if (left.is_single_reference()) {
context = 4 * (left.ref_frames[0] == LastFrame);
context = 4 * (left.ref_frames.primary == LastFrame);
} else {
context = 1 + (left.ref_frames[0] == LastFrame || left.ref_frames[1] == LastFrame);
context = 1 + (left.ref_frames.primary == LastFrame || left.ref_frames.secondary == LastFrame);
}
} else {
if (left.is_single_reference() && above.is_single_reference()) {
context = 2 * (above.ref_frames[0] == LastFrame) + 2 * (left.ref_frames[0] == LastFrame);
context = 2 * (above.ref_frames.primary == LastFrame) + 2 * (left.ref_frames.primary == LastFrame);
} else if (!left.is_single_reference() && !above.is_single_reference()) {
auto above_used_last_frame = above.ref_frames[0] == LastFrame || above.ref_frames[1] == LastFrame;
auto left_used_last_frame = left.ref_frames[0] == LastFrame || left.ref_frames[1] == LastFrame;
auto above_used_last_frame = above.ref_frames.primary == LastFrame || above.ref_frames.secondary == LastFrame;
auto left_used_last_frame = left.ref_frames.primary == LastFrame || left.ref_frames.secondary == LastFrame;
context = 1 + (above_used_last_frame || left_used_last_frame);
} else {
auto single_reference_type = above.is_single_reference() ? above.ref_frames[0] : left.ref_frames[0];
auto compound_reference_a_type = above.is_single_reference() ? left.ref_frames[0] : above.ref_frames[0];
auto compound_reference_b_type = above.is_single_reference() ? left.ref_frames[1] : above.ref_frames[1];
auto single_reference_type = above.is_single_reference() ? above.ref_frames.primary : left.ref_frames.primary;
auto compound_reference_a_type = above.is_single_reference() ? left.ref_frames.primary : above.ref_frames.primary;
auto compound_reference_b_type = above.is_single_reference() ? left.ref_frames.secondary : above.ref_frames.secondary;
context = compound_reference_a_type == LastFrame || compound_reference_b_type == LastFrame;
if (single_reference_type == LastFrame)
context += 3;
@ -442,9 +442,9 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_1(BitStream& bit_stream, Probabi
context = 2;
} else {
if (above.is_single_reference()) {
context = 4 * (above.ref_frames[0] == LastFrame);
context = 4 * (above.ref_frames.primary == LastFrame);
} else {
context = 1 + (above.ref_frames[0] == LastFrame || above.ref_frames[1] == LastFrame);
context = 1 + (above.ref_frames.primary == LastFrame || above.ref_frames.secondary == LastFrame);
}
}
} else if (left.is_available) {
@ -452,9 +452,9 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_1(BitStream& bit_stream, Probabi
context = 2;
} else {
if (left.is_single_reference()) {
context = 4 * (left.ref_frames[0] == LastFrame);
context = 4 * (left.ref_frames.primary == LastFrame);
} else {
context = 1 + (left.ref_frames[0] == LastFrame || left.ref_frames[1] == LastFrame);
context = 1 + (left.ref_frames.primary == LastFrame || left.ref_frames.secondary == LastFrame);
}
}
} else {
@ -478,47 +478,47 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_2(BitStream& bit_stream, Probabi
context = 2;
} else if (left.is_intra_predicted()) {
if (above.is_single_reference()) {
if (above.ref_frames[0] == LastFrame) {
if (above.ref_frames.primary == LastFrame) {
context = 3;
} else {
context = 4 * (above.ref_frames[0] == GoldenFrame);
context = 4 * (above.ref_frames.primary == GoldenFrame);
}
} else {
context = 1 + 2 * (above.ref_frames[0] == GoldenFrame || above.ref_frames[1] == GoldenFrame);
context = 1 + 2 * (above.ref_frames.primary == GoldenFrame || above.ref_frames.secondary == GoldenFrame);
}
} else if (above.is_intra_predicted()) {
if (left.is_single_reference()) {
if (left.ref_frames[0] == LastFrame) {
if (left.ref_frames.primary == LastFrame) {
context = 3;
} else {
context = 4 * (left.ref_frames[0] == GoldenFrame);
context = 4 * (left.ref_frames.primary == GoldenFrame);
}
} else {
context = 1 + 2 * (left.ref_frames[0] == GoldenFrame || left.ref_frames[1] == GoldenFrame);
context = 1 + 2 * (left.ref_frames.primary == GoldenFrame || left.ref_frames.secondary == GoldenFrame);
}
} else {
if (left.is_single_reference() && above.is_single_reference()) {
auto above_last = above.ref_frames[0] == LastFrame;
auto left_last = left.ref_frames[0] == LastFrame;
auto above_last = above.ref_frames.primary == LastFrame;
auto left_last = left.ref_frames.primary == LastFrame;
if (above_last && left_last) {
context = 3;
} else if (above_last) {
context = 4 * (left.ref_frames[0] == GoldenFrame);
context = 4 * (left.ref_frames.primary == GoldenFrame);
} else if (left_last) {
context = 4 * (above.ref_frames[0] == GoldenFrame);
context = 4 * (above.ref_frames.primary == GoldenFrame);
} else {
context = 2 * (above.ref_frames[0] == GoldenFrame) + 2 * (left.ref_frames[0] == GoldenFrame);
context = 2 * (above.ref_frames.primary == GoldenFrame) + 2 * (left.ref_frames.primary == GoldenFrame);
}
} else if (!left.is_single_reference() && !above.is_single_reference()) {
if (above.ref_frames[0] == left.ref_frames[0] && above.ref_frames[1] == left.ref_frames[1]) {
context = 3 * (above.ref_frames[0] == GoldenFrame || above.ref_frames[1] == GoldenFrame);
if (above.ref_frames.primary == left.ref_frames.primary && above.ref_frames.secondary == left.ref_frames.secondary) {
context = 3 * (above.ref_frames.primary == GoldenFrame || above.ref_frames.secondary == GoldenFrame);
} else {
context = 2;
}
} else {
auto single_reference_type = above.is_single_reference() ? above.ref_frames[0] : left.ref_frames[0];
auto compound_reference_a_type = above.is_single_reference() ? left.ref_frames[0] : above.ref_frames[0];
auto compound_reference_b_type = above.is_single_reference() ? left.ref_frames[1] : above.ref_frames[1];
auto single_reference_type = above.is_single_reference() ? above.ref_frames.primary : left.ref_frames.primary;
auto compound_reference_a_type = above.is_single_reference() ? left.ref_frames.primary : above.ref_frames.primary;
auto compound_reference_b_type = above.is_single_reference() ? left.ref_frames.secondary : above.ref_frames.secondary;
context = compound_reference_a_type == GoldenFrame || compound_reference_b_type == GoldenFrame;
if (single_reference_type == GoldenFrame) {
context += 3;
@ -528,20 +528,20 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_2(BitStream& bit_stream, Probabi
}
}
} else if (above.is_available) {
if (above.is_intra_predicted() || (above.ref_frames[0] == LastFrame && above.is_single_reference())) {
if (above.is_intra_predicted() || (above.ref_frames.primary == LastFrame && above.is_single_reference())) {
context = 2;
} else if (above.is_single_reference()) {
context = 4 * (above.ref_frames[0] == GoldenFrame);
context = 4 * (above.ref_frames.primary == GoldenFrame);
} else {
context = 3 * (above.ref_frames[0] == GoldenFrame || above.ref_frames[1] == GoldenFrame);
context = 3 * (above.ref_frames.primary == GoldenFrame || above.ref_frames.secondary == GoldenFrame);
}
} else if (left.is_available) {
if (left.is_intra_predicted() || (left.ref_frames[0] == LastFrame && left.is_single_reference())) {
if (left.is_intra_predicted() || (left.ref_frames.primary == LastFrame && left.is_single_reference())) {
context = 2;
} else if (left.is_single_reference()) {
context = 4 * (left.ref_frames[0] == GoldenFrame);
context = 4 * (left.ref_frames.primary == GoldenFrame);
} else {
context = 3 * (left.ref_frames[0] == GoldenFrame || left.ref_frames[1] == GoldenFrame);
context = 3 * (left.ref_frames.primary == GoldenFrame || left.ref_frames.secondary == GoldenFrame);
}
} else {
context = 2;

View file

@ -62,7 +62,7 @@ public:
static ErrorOr<TXSize> parse_tx_size(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, TXSize max_tx_size, FrameBlockContext above, FrameBlockContext left);
static ErrorOr<bool> parse_block_is_inter_predicted(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, FrameBlockContext above, FrameBlockContext left);
static ErrorOr<ReferenceMode> parse_comp_mode(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, ReferenceFrameType comp_fixed_ref, FrameBlockContext above, FrameBlockContext left);
static ErrorOr<bool> parse_comp_ref(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, ReferenceFrameType comp_fixed_ref, ReferenceFramePair comp_var_ref, u8 variable_reference_index, FrameBlockContext above, FrameBlockContext left);
static ErrorOr<ReferenceIndex> parse_comp_ref(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, ReferenceFrameType comp_fixed_ref, ReferenceFramePair comp_var_ref, ReferenceIndex variable_reference_index, FrameBlockContext above, FrameBlockContext left);
static ErrorOr<bool> parse_single_ref_part_1(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, FrameBlockContext above, FrameBlockContext left);
static ErrorOr<bool> parse_single_ref_part_2(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, FrameBlockContext above, FrameBlockContext left);