1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-28 13:37:45 +00:00

Audio: Change how volume works

Across the entire audio system, audio now works in 0-1 terms instead of
0-100 as before. Therefore, volume is now a double instead of an int.
The master volume of the AudioServer changes smoothly through a
FadingProperty, preventing clicks. Finally, volume computations are done
with logarithmic scaling, which is more natural for the human ear.

Note that this could be 4-5 different commits, but as they change each
other's code all the time, it makes no sense to split them up.
This commit is contained in:
kleines Filmröllchen 2021-08-27 23:47:09 +02:00 committed by Andreas Kling
parent 2909c3a931
commit 152ec28da0
14 changed files with 190 additions and 45 deletions

View file

@ -4,5 +4,5 @@ endpoint AudioClient
{
finished_playing_buffer(i32 buffer_id) =|
muted_state_changed(bool muted) =|
main_mix_volume_changed(i32 volume) =|
main_mix_volume_changed(double volume) =|
}

View file

@ -5,8 +5,8 @@ endpoint AudioServer
// Mixer functions
set_muted(bool muted) => ()
get_muted() => (bool muted)
get_main_mix_volume() => (i32 volume)
set_main_mix_volume(i32 volume) => ()
get_main_mix_volume() => (double volume)
set_main_mix_volume(double volume) => ()
// Audio device
set_sample_rate(u16 sample_rate) => ()

View file

@ -48,7 +48,7 @@ void ClientConnection::did_change_muted_state(Badge<Mixer>, bool muted)
async_muted_state_changed(muted);
}
void ClientConnection::did_change_main_mix_volume(Badge<Mixer>, int volume)
void ClientConnection::did_change_main_mix_volume(Badge<Mixer>, double volume)
{
async_main_mix_volume_changed(volume);
}
@ -58,7 +58,7 @@ Messages::AudioServer::GetMainMixVolumeResponse ClientConnection::get_main_mix_v
return m_mixer.main_volume();
}
void ClientConnection::set_main_mix_volume(i32 volume)
void ClientConnection::set_main_mix_volume(double volume)
{
m_mixer.set_main_volume(volume);
}

View file

@ -28,7 +28,7 @@ public:
void did_finish_playing_buffer(Badge<BufferQueue>, int buffer_id);
void did_change_muted_state(Badge<Mixer>, bool muted);
void did_change_main_mix_volume(Badge<Mixer>, int volume);
void did_change_main_mix_volume(Badge<Mixer>, double volume);
virtual void die() override;
@ -36,7 +36,7 @@ public:
private:
virtual Messages::AudioServer::GetMainMixVolumeResponse get_main_mix_volume() override;
virtual void set_main_mix_volume(i32) override;
virtual void set_main_mix_volume(double) override;
virtual Messages::AudioServer::EnqueueBufferResponse enqueue_buffer(Core::AnonymousBuffer const&, i32, int) override;
virtual Messages::AudioServer::GetRemainingSamplesResponse get_remaining_samples() override;
virtual Messages::AudioServer::GetPlayedSamplesResponse get_played_samples() override;

View file

@ -0,0 +1,85 @@
/*
* Copyright (c) 2021, kleines Filmröllchen <malu.bertsch@gmail.com>.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "Mixer.h"
#include <compare>
namespace AudioServer {
// This is in buffer counts.
// As each buffer is approx 1/40 of a second, this means about 1/4 of a second of fade time.
constexpr int DEFAULT_FADE_TIME = 10;
// A property of an audio system that needs to fade briefly whenever changed.
template<typename T>
class FadingProperty {
public:
FadingProperty(T const value)
: FadingProperty(value, DEFAULT_FADE_TIME)
{
}
FadingProperty(T const value, int const fade_time)
: m_old_value(value)
, m_new_value(move(value))
, m_fade_time(fade_time)
{
}
virtual ~FadingProperty()
{
m_old_value.~T();
m_new_value.~T();
}
FadingProperty<T>& operator=(T const& new_value)
{
// The origin of the fade is wherever we're right now.
m_old_value = static_cast<T>(*this);
m_new_value = new_value;
m_current_fade = 0;
return *this;
}
FadingProperty<T>& operator=(FadingProperty<T> const&) = delete;
operator T() const
{
if (!is_fading())
return m_new_value;
return m_old_value * (1 - m_current_fade) + m_new_value * (m_current_fade);
}
auto operator<=>(FadingProperty<T> const& other) const
{
return static_cast<T>(this) <=> static_cast<T>(other);
}
auto operator<=>(T const& other) const
{
return static_cast<T>(*this) <=> other;
}
void advance_time()
{
m_current_fade += 1.0 / static_cast<double>(m_fade_time);
m_current_fade = clamp(m_current_fade, 0.0, 1.0);
}
bool is_fading() const
{
return m_current_fade < 1;
}
T target() const { return m_new_value; }
private:
T m_old_value {};
T m_new_value {};
double m_current_fade { 0 };
int const m_fade_time;
};
}

View file

@ -40,7 +40,7 @@ Mixer::Mixer(NonnullRefPtr<Core::ConfigFile> config)
pthread_cond_init(&m_pending_cond, nullptr);
m_muted = m_config->read_bool_entry("Master", "Mute", false);
m_main_volume = m_config->read_num_entry("Master", "Volume", 100);
m_main_volume = static_cast<double>(m_config->read_num_entry("Master", "Volume", 100)) / 100.0;
m_sound_thread->start();
}
@ -78,18 +78,23 @@ void Mixer::mix()
Audio::Frame mixed_buffer[1024];
auto mixed_buffer_length = (int)(sizeof(mixed_buffer) / sizeof(Audio::Frame));
m_main_volume.advance_time();
int active_queues = 0;
// Mix the buffers together into the output
for (auto& queue : active_mix_queues) {
if (!queue->client()) {
queue->clear();
continue;
}
++active_queues;
for (int i = 0; i < mixed_buffer_length; ++i) {
auto& mixed_sample = mixed_buffer[i];
Audio::Frame sample;
if (!queue->get_next_sample(sample))
break;
sample.log_multiply(SAMPLE_HEADROOM);
mixed_sample += sample;
}
}
@ -103,7 +108,11 @@ void Mixer::mix()
for (int i = 0; i < mixed_buffer_length; ++i) {
auto& mixed_sample = mixed_buffer[i];
mixed_sample.scale(m_main_volume);
// Even though it's not realistic, the user expects no sound at 0%.
if (m_main_volume < 0.01)
mixed_sample = { 0 };
else
mixed_sample.log_multiply(m_main_volume);
mixed_sample.clip();
LittleEndian<i16> out_sample;
@ -121,20 +130,20 @@ void Mixer::mix()
}
}
void Mixer::set_main_volume(int volume)
void Mixer::set_main_volume(double volume)
{
if (volume < 0)
m_main_volume = 0;
else if (volume > 200)
m_main_volume = 200;
else if (volume > 2)
m_main_volume = 2;
else
m_main_volume = volume;
m_config->write_num_entry("Master", "Volume", volume);
m_config->write_num_entry("Master", "Volume", static_cast<int>(volume * 100));
request_setting_sync();
ClientConnection::for_each([&](ClientConnection& client) {
client.did_change_main_mix_volume({}, m_main_volume);
client.did_change_main_mix_volume({}, main_volume());
});
}

View file

@ -8,6 +8,7 @@
#pragma once
#include "ClientConnection.h"
#include "FadingProperty.h"
#include <AK/Atomic.h>
#include <AK/Badge.h>
#include <AK/ByteBuffer.h>
@ -23,6 +24,10 @@
namespace AudioServer {
// Headroom, i.e. fixed attenuation for all audio streams.
// This is to prevent clipping when two streams with low headroom (e.g. normalized & compressed) are playing.
constexpr double SAMPLE_HEADROOM = 0.7;
class ClientConnection;
class BufferQueue : public RefCounted<BufferQueue> {
@ -82,6 +87,10 @@ public:
return -1;
}
FadingProperty<double>& volume() { return m_volume; }
double volume() const { return m_volume; }
void set_volume(double const volume) { m_volume = volume; }
private:
RefPtr<Audio::Buffer> m_current;
Queue<NonnullRefPtr<Audio::Buffer>> m_queue;
@ -89,7 +98,9 @@ private:
int m_remaining_samples { 0 };
int m_played_samples { 0 };
bool m_paused { false };
WeakPtr<ClientConnection> m_client;
FadingProperty<double> m_volume { 1 };
};
class Mixer : public Core::Object {
@ -100,8 +111,9 @@ public:
NonnullRefPtr<BufferQueue> create_queue(ClientConnection&);
int main_volume() const { return m_main_volume; }
void set_main_volume(int volume);
// To the outside world, we pretend that the target volume is already reached, even though it may be still fading.
double main_volume() const { return m_main_volume.target(); }
void set_main_volume(double volume);
bool is_muted() const { return m_muted; }
void set_muted(bool);
@ -122,7 +134,7 @@ private:
NonnullRefPtr<Threading::Thread> m_sound_thread;
bool m_muted { false };
int m_main_volume { 100 };
FadingProperty<double> m_main_volume { 1 };
NonnullRefPtr<Core::ConfigFile> m_config;
RefPtr<Core::Timer> m_config_write_timer;