mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 01:57:45 +00:00
LibVideo: Implement Matroska Cues for faster keyframe lookup
This implements the fastest seeking mode available for tracks with cues using an array of cue points for each track. It approximates the index based on the seeking timestamp and then finds the earliest cue point before the timestamp. The approximation assumes that cues will be on a regular interval, which I don't believe is always the case, but it should at least be faster than iterating the whole set of cue points each time. Cues are stored per track, but most videos will only have cue points for the video track(s) that are present. For now, this assumes that it should only seek based on the cue points for the selected track. To seek audio in a video file, we should copy the seeked iterator over to the audio track's iterator after seeking is complete. The iterator will then skip to the next audio block.
This commit is contained in:
parent
56d8b96c78
commit
f6830eaf73
5 changed files with 288 additions and 2 deletions
|
@ -24,10 +24,12 @@ ErrorOr<int> serenity_main(Main::Arguments arguments)
|
|||
{
|
||||
StringView filename;
|
||||
bool blocks = false;
|
||||
bool cues = false;
|
||||
u64 track_number = 0;
|
||||
|
||||
Core::ArgsParser args_parser;
|
||||
args_parser.add_option(blocks, "Print blocks for each track.", "blocks", 'b');
|
||||
args_parser.add_option(cues, "Print cue points for each track.", "cues", 'c');
|
||||
args_parser.add_option<u64>(track_number, "Specify a track number to print info for, omit to print all of them.", "track", 't', "tracknumber");
|
||||
args_parser.add_positional_argument(filename, "The video file to display.", "filename", Core::ArgsParser::Required::Yes);
|
||||
args_parser.parse(arguments);
|
||||
|
@ -61,6 +63,28 @@ ErrorOr<int> serenity_main(Main::Arguments arguments)
|
|||
outln("\t\tAudio has {} channels with a bit depth of {}", audio_track.channels, audio_track.bit_depth);
|
||||
}
|
||||
|
||||
if (cues) {
|
||||
auto const& cue_points = TRY(reader.cue_points_for_track(track_entry.track_number()));
|
||||
|
||||
if (cue_points.has_value()) {
|
||||
outln("\tCues points:");
|
||||
|
||||
for (auto const& cue_point : cue_points.value()) {
|
||||
outln("\t\tCue point at {}ms:", cue_point.timestamp().to_milliseconds());
|
||||
auto const& track_position = cue_point.position_for_track(track_entry.track_number());
|
||||
|
||||
if (!track_position.has_value()) {
|
||||
outln("\t\t\tCue point has no positions for this track, this should not happen");
|
||||
continue;
|
||||
}
|
||||
outln("\t\t\tCluster position {}", track_position->cluster_position());
|
||||
outln("\t\t\tBlock offset {}", track_position->block_offset());
|
||||
}
|
||||
} else {
|
||||
outln("\tNo cue points exist for this track");
|
||||
}
|
||||
}
|
||||
|
||||
if (blocks) {
|
||||
outln("\tBlocks:");
|
||||
auto iterator = TRY(reader.create_sample_iterator(track_entry.track_number()));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue