mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 12:07:45 +00:00
LibDSP+Piano: Convert DSP APIs to accept entire sample ranges
This has mainly performance benefits, so that we only need to call into all processors once for every audio buffer segment. It requires adjusting quite some logic in most processors and in Track, as we have to consider a larger collection of notes and samples at each step. There's some cautionary TODOs in the currently unused LibDSP tracks because they don't do things properly yet.
This commit is contained in:
parent
4d65607649
commit
9035d9e845
9 changed files with 123 additions and 75 deletions
|
@ -45,8 +45,11 @@ void Track::fill_sample(Sample& sample)
|
||||||
m_keyboard_notes[i] = {};
|
m_keyboard_notes[i] = {};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto synthesized_sample = m_synth->process(playing_notes).get<LibDSP::Sample>();
|
auto synthesized_sample = LibDSP::Signal { FixedArray<Audio::Sample>::must_create_but_fixme_should_propagate_errors(1) };
|
||||||
auto delayed_sample = m_delay->process(synthesized_sample).get<LibDSP::Sample>();
|
m_synth->process(playing_notes, synthesized_sample);
|
||||||
|
auto delayed_signal = LibDSP::Signal { FixedArray<Audio::Sample>::must_create_but_fixme_should_propagate_errors(1) };
|
||||||
|
m_delay->process(synthesized_sample, delayed_signal);
|
||||||
|
auto delayed_sample = delayed_signal.get<FixedArray<Audio::Sample>>()[0];
|
||||||
|
|
||||||
// HACK: Convert to old Piano range: 16-bit int
|
// HACK: Convert to old Piano range: 16-bit int
|
||||||
delayed_sample *= NumericLimits<i16>::max();
|
delayed_sample *= NumericLimits<i16>::max();
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "Effects.h"
|
#include "Effects.h"
|
||||||
|
#include <AK/FixedArray.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
|
||||||
namespace LibDSP::Effects {
|
namespace LibDSP::Effects {
|
||||||
|
@ -32,13 +33,17 @@ void Delay::handle_delay_time_change()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Signal Delay::process_impl(Signal const& input_signal)
|
void Delay::process_impl(Signal const& input_signal, Signal& output_signal)
|
||||||
{
|
{
|
||||||
|
// FIXME: This is allocating and needs to happen on a different thread.
|
||||||
handle_delay_time_change();
|
handle_delay_time_change();
|
||||||
|
|
||||||
Sample const& in = input_signal.get<Sample>();
|
auto const& samples = input_signal.get<FixedArray<Sample>>();
|
||||||
Sample out;
|
auto& output = output_signal.get<FixedArray<Sample>>();
|
||||||
out += in.log_multiplied(static_cast<double>(m_dry_gain));
|
for (size_t i = 0; i < output.size(); ++i) {
|
||||||
|
auto& out = output[i];
|
||||||
|
auto const& sample = samples[i];
|
||||||
|
out += sample.log_multiplied(static_cast<double>(m_dry_gain));
|
||||||
out += m_delay_buffer[m_delay_index].log_multiplied(m_delay_decay);
|
out += m_delay_buffer[m_delay_index].log_multiplied(m_delay_decay);
|
||||||
|
|
||||||
// This is also convenient for disabling the delay effect by setting the buffer size to 0
|
// This is also convenient for disabling the delay effect by setting the buffer size to 0
|
||||||
|
@ -47,8 +52,7 @@ Signal Delay::process_impl(Signal const& input_signal)
|
||||||
|
|
||||||
if (m_delay_index >= m_delay_buffer.size())
|
if (m_delay_index >= m_delay_buffer.size())
|
||||||
m_delay_index = 0;
|
m_delay_index = 0;
|
||||||
|
}
|
||||||
return Signal(out);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Mastering::Mastering(NonnullRefPtr<Transport> transport)
|
Mastering::Mastering(NonnullRefPtr<Transport> transport)
|
||||||
|
@ -56,7 +60,7 @@ Mastering::Mastering(NonnullRefPtr<Transport> transport)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
Signal Mastering::process_impl([[maybe_unused]] Signal const& input_signal)
|
void Mastering::process_impl([[maybe_unused]] Signal const& input_signal, [[maybe_unused]] Signal& output_signal)
|
||||||
{
|
{
|
||||||
TODO();
|
TODO();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ public:
|
||||||
Delay(NonnullRefPtr<Transport>);
|
Delay(NonnullRefPtr<Transport>);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
virtual Signal process_impl(Signal const&) override;
|
virtual void process_impl(Signal const&, Signal&) override;
|
||||||
void handle_delay_time_change();
|
void handle_delay_time_change();
|
||||||
|
|
||||||
ProcessorRangeParameter m_delay_decay;
|
ProcessorRangeParameter m_delay_decay;
|
||||||
|
@ -38,7 +38,7 @@ public:
|
||||||
Mastering(NonnullRefPtr<Transport>);
|
Mastering(NonnullRefPtr<Transport>);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
virtual Signal process_impl(Signal const&) override;
|
virtual void process_impl(Signal const&, Signal&) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,9 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <AK/FixedArray.h>
|
||||||
#include <AK/HashMap.h>
|
#include <AK/HashMap.h>
|
||||||
|
#include <AK/Noncopyable.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
#include <AK/Variant.h>
|
#include <AK/Variant.h>
|
||||||
#include <AK/Vector.h>
|
#include <AK/Vector.h>
|
||||||
|
@ -66,13 +68,29 @@ enum class SignalType : u8 {
|
||||||
Note
|
Note
|
||||||
};
|
};
|
||||||
|
|
||||||
using RollNotes = OrderedHashMap<u8, RollNote>;
|
// Perfect hashing for note (MIDI) values. This just uses the note value as the hash itself.
|
||||||
|
class PerfectNoteHashTraits : Traits<u8> {
|
||||||
|
public:
|
||||||
|
static constexpr bool equals(u8 const& a, u8 const& b) { return a == b; }
|
||||||
|
static constexpr unsigned hash(u8 value)
|
||||||
|
{
|
||||||
|
return static_cast<unsigned>(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct Signal : public Variant<Sample, RollNotes> {
|
using RollNotes = OrderedHashMap<u8, RollNote, PerfectNoteHashTraits>;
|
||||||
|
|
||||||
|
struct Signal : public Variant<FixedArray<Sample>, RollNotes> {
|
||||||
using Variant::Variant;
|
using Variant::Variant;
|
||||||
|
AK_MAKE_NONCOPYABLE(Signal);
|
||||||
|
|
||||||
|
public:
|
||||||
|
Signal& operator=(Signal&&) = default;
|
||||||
|
Signal(Signal&&) = default;
|
||||||
|
|
||||||
ALWAYS_INLINE SignalType type() const
|
ALWAYS_INLINE SignalType type() const
|
||||||
{
|
{
|
||||||
if (has<Sample>())
|
if (has<FixedArray<Sample>>())
|
||||||
return SignalType::Sample;
|
return SignalType::Sample;
|
||||||
if (has<RollNotes>())
|
if (has<RollNotes>())
|
||||||
return SignalType::Note;
|
return SignalType::Note;
|
||||||
|
|
|
@ -24,12 +24,11 @@ class Processor : public RefCounted<Processor> {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual ~Processor() = default;
|
virtual ~Processor() = default;
|
||||||
Signal process(Signal const& input_signal)
|
void process(Signal const& input_signal, Signal& output_signal)
|
||||||
{
|
{
|
||||||
VERIFY(input_signal.type() == m_input_type);
|
VERIFY(input_signal.type() == m_input_type);
|
||||||
auto processed = process_impl(input_signal);
|
process_impl(input_signal, output_signal);
|
||||||
VERIFY(processed.type() == m_output_type);
|
VERIFY(output_signal.type() == m_output_type);
|
||||||
return processed;
|
|
||||||
}
|
}
|
||||||
SignalType input_type() const { return m_input_type; }
|
SignalType input_type() const { return m_input_type; }
|
||||||
SignalType output_type() const { return m_output_type; }
|
SignalType output_type() const { return m_output_type; }
|
||||||
|
@ -47,7 +46,7 @@ protected:
|
||||||
, m_transport(move(transport))
|
, m_transport(move(transport))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
virtual Signal process_impl(Signal const& input_signal) = 0;
|
virtual void process_impl(Signal const& input_signal, Signal& output_signal) = 0;
|
||||||
|
|
||||||
NonnullRefPtr<Transport> m_transport;
|
NonnullRefPtr<Transport> m_transport;
|
||||||
Vector<ProcessorParameter&> m_parameters;
|
Vector<ProcessorParameter&> m_parameters;
|
||||||
|
|
|
@ -31,11 +31,15 @@ Classic::Classic(NonnullRefPtr<Transport> transport)
|
||||||
m_parameters.append(m_release);
|
m_parameters.append(m_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
Signal Classic::process_impl(Signal const& input_signal)
|
void Classic::process_impl(Signal const& input_signal, [[maybe_unused]] Signal& output_signal)
|
||||||
{
|
{
|
||||||
auto& in = input_signal.get<RollNotes>();
|
auto const& in = input_signal.get<RollNotes>();
|
||||||
|
auto& output_samples = output_signal.get<FixedArray<Sample>>();
|
||||||
|
|
||||||
Sample out;
|
// Do this for every time step and set the signal accordingly.
|
||||||
|
for (size_t sample_index = 0; sample_index < output_samples.size(); ++sample_index) {
|
||||||
|
Sample& out = output_samples[sample_index];
|
||||||
|
u32 sample_time = m_transport->time() + sample_index;
|
||||||
|
|
||||||
SinglyLinkedList<PitchedEnvelope> playing_envelopes;
|
SinglyLinkedList<PitchedEnvelope> playing_envelopes;
|
||||||
|
|
||||||
|
@ -46,7 +50,7 @@ Signal Classic::process_impl(Signal const& input_signal)
|
||||||
m_playing_notes.set(i, maybe_note.value());
|
m_playing_notes.set(i, maybe_note.value());
|
||||||
|
|
||||||
if (m_playing_notes.contains(i)) {
|
if (m_playing_notes.contains(i)) {
|
||||||
Envelope note_envelope = m_playing_notes.get(i)->to_envelope(m_transport->time(), m_attack * m_transport->ms_sample_rate(), m_decay * m_transport->ms_sample_rate(), m_release * m_transport->ms_sample_rate());
|
Envelope note_envelope = m_playing_notes.get(i)->to_envelope(sample_time, m_attack * m_transport->ms_sample_rate(), m_decay * m_transport->ms_sample_rate(), m_release * m_transport->ms_sample_rate());
|
||||||
if (!note_envelope.is_active()) {
|
if (!note_envelope.is_active()) {
|
||||||
m_playing_notes.remove(i);
|
m_playing_notes.remove(i);
|
||||||
continue;
|
continue;
|
||||||
|
@ -61,8 +65,7 @@ Signal Classic::process_impl(Signal const& input_signal)
|
||||||
double wave = wave_position(envelope.note);
|
double wave = wave_position(envelope.note);
|
||||||
out += volume * wave;
|
out += volume * wave;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Linear ADSR envelope with no peak adjustment.
|
// Linear ADSR envelope with no peak adjustment.
|
||||||
|
|
|
@ -47,7 +47,7 @@ public:
|
||||||
Waveform wave() const { return m_waveform.value(); }
|
Waveform wave() const { return m_waveform.value(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
virtual Signal process_impl(Signal const&) override;
|
virtual void process_impl(Signal const&, Signal&) override;
|
||||||
|
|
||||||
double volume_from_envelope(Envelope const&) const;
|
double volume_from_envelope(Envelope const&) const;
|
||||||
double wave_position(u8 note);
|
double wave_position(u8 note);
|
||||||
|
|
|
@ -4,13 +4,16 @@
|
||||||
* SPDX-License-Identifier: BSD-2-Clause
|
* SPDX-License-Identifier: BSD-2-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <AK/FixedArray.h>
|
||||||
|
#include <AK/NoAllocationGuard.h>
|
||||||
#include <AK/Optional.h>
|
#include <AK/Optional.h>
|
||||||
|
#include <AK/StdLibExtras.h>
|
||||||
|
#include <AK/TypedTransfer.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
|
#include <LibDSP/Music.h>
|
||||||
#include <LibDSP/Processor.h>
|
#include <LibDSP/Processor.h>
|
||||||
#include <LibDSP/Track.h>
|
#include <LibDSP/Track.h>
|
||||||
|
|
||||||
using namespace std;
|
|
||||||
|
|
||||||
namespace LibDSP {
|
namespace LibDSP {
|
||||||
|
|
||||||
bool Track::add_processor(NonnullRefPtr<Processor> new_processor)
|
bool Track::add_processor(NonnullRefPtr<Processor> new_processor)
|
||||||
|
@ -48,20 +51,43 @@ bool NoteTrack::check_processor_chain_valid() const
|
||||||
return check_processor_chain_valid_with_initial_type(SignalType::Note);
|
return check_processor_chain_valid_with_initial_type(SignalType::Note);
|
||||||
}
|
}
|
||||||
|
|
||||||
Sample Track::current_signal()
|
ErrorOr<void> Track::resize_internal_buffers_to(size_t buffer_size)
|
||||||
{
|
{
|
||||||
|
m_secondary_sample_buffer = TRY(FixedArray<Sample>::try_create(buffer_size));
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void Track::current_signal(FixedArray<Sample>& output_signal)
|
||||||
|
{
|
||||||
|
// This is real-time code. We must NEVER EVER EVER allocate.
|
||||||
|
NoAllocationGuard guard;
|
||||||
|
VERIFY(output_signal.size() == m_secondary_sample_buffer.get<FixedArray<Sample>>().size());
|
||||||
|
|
||||||
compute_current_clips_signal();
|
compute_current_clips_signal();
|
||||||
Optional<Signal> the_signal;
|
Signal* source_signal = &m_current_signal;
|
||||||
|
// This provides an audio buffer of the right size. It is not allocated here, but whenever we are informed about a buffer size change.
|
||||||
|
Signal* target_signal = &m_secondary_sample_buffer;
|
||||||
|
|
||||||
for (auto& processor : m_processor_chain) {
|
for (auto& processor : m_processor_chain) {
|
||||||
the_signal = processor.process(the_signal.value_or(m_current_signal));
|
// Depending on what the processor needs to have as output, we need to place either a pre-allocated note hash map or a pre-allocated sample buffer in the target signal.
|
||||||
|
if (processor.output_type() == SignalType::Note)
|
||||||
|
target_signal = &m_secondary_note_buffer;
|
||||||
|
else
|
||||||
|
target_signal = &m_secondary_sample_buffer;
|
||||||
|
processor.process(*source_signal, *target_signal);
|
||||||
|
swap(source_signal, target_signal);
|
||||||
}
|
}
|
||||||
VERIFY(the_signal.has_value() && the_signal->type() == SignalType::Sample);
|
VERIFY(source_signal->type() == SignalType::Sample);
|
||||||
return the_signal->get<Sample>();
|
VERIFY(output_signal.size() == source_signal->get<FixedArray<Sample>>().size());
|
||||||
|
// This is one final unavoidable memcopy. Otherwise we need to special-case the last processor or
|
||||||
|
AK::TypedTransfer<Sample>::copy(output_signal.data(), source_signal->get<FixedArray<Sample>>().data(), output_signal.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void NoteTrack::compute_current_clips_signal()
|
void NoteTrack::compute_current_clips_signal()
|
||||||
{
|
{
|
||||||
|
// Consider the entire time duration.
|
||||||
|
TODO();
|
||||||
|
|
||||||
u32 time = m_transport->time();
|
u32 time = m_transport->time();
|
||||||
// Find the currently playing clip.
|
// Find the currently playing clip.
|
||||||
NoteClip* playing_clip = nullptr;
|
NoteClip* playing_clip = nullptr;
|
||||||
|
@ -91,22 +117,8 @@ void NoteTrack::compute_current_clips_signal()
|
||||||
|
|
||||||
void AudioTrack::compute_current_clips_signal()
|
void AudioTrack::compute_current_clips_signal()
|
||||||
{
|
{
|
||||||
// Find the currently playing clip.
|
// This is quite involved as we need to look at multiple clips and take looping into account.
|
||||||
u32 time = m_transport->time();
|
TODO();
|
||||||
AudioClip* playing_clip = nullptr;
|
|
||||||
for (auto& clip : m_clips) {
|
|
||||||
if (clip.start() <= time && clip.end() >= time) {
|
|
||||||
playing_clip = &clip;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (playing_clip == nullptr) {
|
|
||||||
m_current_signal = Signal(static_cast<Sample const&>(SAMPLE_OFF));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index into the clip's samples.
|
|
||||||
u32 effective_sample = time - playing_clip->start();
|
|
||||||
m_current_signal = Signal(playing_clip->sample_at(effective_sample));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,11 @@ public:
|
||||||
virtual bool check_processor_chain_valid() const = 0;
|
virtual bool check_processor_chain_valid() const = 0;
|
||||||
bool add_processor(NonnullRefPtr<Processor> new_processor);
|
bool add_processor(NonnullRefPtr<Processor> new_processor);
|
||||||
|
|
||||||
// Creates the current signal of the track by processing current note or audio data through the processing chain
|
// Creates the current signal of the track by processing current note or audio data through the processing chain.
|
||||||
Sample current_signal();
|
void current_signal(FixedArray<Sample>& output_signal);
|
||||||
|
|
||||||
|
// We are informed of an audio buffer size change. This happens off-audio-thread so we can allocate.
|
||||||
|
ErrorOr<void> resize_internal_buffers_to(size_t buffer_size);
|
||||||
|
|
||||||
NonnullRefPtrVector<Processor> const& processor_chain() const { return m_processor_chain; }
|
NonnullRefPtrVector<Processor> const& processor_chain() const { return m_processor_chain; }
|
||||||
NonnullRefPtr<Transport const> transport() const { return m_transport; }
|
NonnullRefPtr<Transport const> transport() const { return m_transport; }
|
||||||
|
@ -42,7 +45,13 @@ protected:
|
||||||
NonnullRefPtrVector<Processor> m_processor_chain;
|
NonnullRefPtrVector<Processor> m_processor_chain;
|
||||||
NonnullRefPtr<Transport> m_transport;
|
NonnullRefPtr<Transport> m_transport;
|
||||||
// The current signal is stored here, to prevent unnecessary reallocation.
|
// The current signal is stored here, to prevent unnecessary reallocation.
|
||||||
Signal m_current_signal { Audio::Sample {} };
|
Signal m_current_signal { FixedArray<Sample> {} };
|
||||||
|
|
||||||
|
// These are so that we don't have to allocate a secondary buffer in current_signal().
|
||||||
|
// A sample buffer possibly used by the processor chain.
|
||||||
|
Signal m_secondary_sample_buffer { FixedArray<Sample> {} };
|
||||||
|
// A note buffer possibly used by the processor chain.
|
||||||
|
Signal m_secondary_note_buffer { RollNotes {} };
|
||||||
};
|
};
|
||||||
|
|
||||||
class NoteTrack final : public Track {
|
class NoteTrack final : public Track {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue