1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 02:37:36 +00:00

Kernel: Move Random.{h,cpp} code to Security subdirectory

This commit is contained in:
Liav A 2023-02-24 19:49:37 +02:00 committed by Jelle Raaijmakers
parent 1b04726c85
commit 490856453d
36 changed files with 35 additions and 35 deletions

176
Kernel/Security/Random.cpp Normal file
View file

@ -0,0 +1,176 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020, Peter Elliott <pelliott@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Singleton.h>
#include <Kernel/Arch/Processor.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/Time/HPET.h>
# include <Kernel/Arch/x86_64/Time/RTC.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/ASM_wrapper.h>
#endif
#include <Kernel/Devices/Generic/RandomDevice.h>
#include <Kernel/Sections.h>
#include <Kernel/Security/Random.h>
#include <Kernel/Time/TimeManagement.h>
namespace Kernel {
static Singleton<KernelRng> s_the;
static Atomic<u32, AK::MemoryOrder::memory_order_relaxed> s_next_random_value = 1;
KernelRng& KernelRng::the()
{
return *s_the;
}
UNMAP_AFTER_INIT KernelRng::KernelRng()
{
#if ARCH(X86_64)
if (Processor::current().has_feature(CPUFeature::RDSEED)) {
dmesgln("KernelRng: Using RDSEED as entropy source");
for (size_t i = 0; i < pool_count * reseed_threshold; ++i) {
add_random_event(Kernel::read_rdseed(), i % 32);
}
} else if (Processor::current().has_feature(CPUFeature::RDRAND)) {
dmesgln("KernelRng: Using RDRAND as entropy source");
for (size_t i = 0; i < pool_count * reseed_threshold; ++i) {
add_random_event(Kernel::read_rdrand(), i % 32);
}
} else if (TimeManagement::the().can_query_precise_time()) {
// Add HPET as entropy source if we don't have anything better.
dmesgln("KernelRng: Using HPET as entropy source");
for (size_t i = 0; i < pool_count * reseed_threshold; ++i) {
u64 hpet_time = HPET::the().read_main_counter_unsafe();
add_random_event(hpet_time, i % 32);
}
} else {
// Fallback to RTC
dmesgln("KernelRng: Using RTC as entropy source (bad!)");
auto current_time = static_cast<u64>(RTC::now());
for (size_t i = 0; i < pool_count * reseed_threshold; ++i) {
add_random_event(current_time, i % 32);
current_time *= 0x574au;
current_time += 0x40b2u;
}
}
#elif ARCH(AARCH64)
if (Processor::current().has_feature(CPUFeature::RNG)) {
dmesgln("KernelRng: Using RNDRRS as entropy source");
for (size_t i = 0; i < pool_count * reseed_threshold; ++i) {
add_random_event(Aarch64::Asm::read_rndrrs(), i % 32);
}
} else {
// Fallback to TimeManagement as entropy
dmesgln("KernelRng: Using bad entropy source TimeManagement");
auto current_time = static_cast<u64>(TimeManagement::now().milliseconds_since_epoch());
for (size_t i = 0; i < pool_count * reseed_threshold; ++i) {
add_random_event(current_time, i % 32);
current_time *= 0x574au;
current_time += 0x40b2u;
}
}
#else
dmesgln("KernelRng: No entropy source available!");
#endif
}
void KernelRng::wait_for_entropy()
{
SpinlockLocker lock(get_lock());
if (!is_ready()) {
dbgln("Entropy starvation...");
m_seed_queue.wait_forever("KernelRng"sv);
}
}
void KernelRng::wake_if_ready()
{
VERIFY(get_lock().is_locked());
if (is_ready()) {
m_seed_queue.wake_all();
}
}
size_t EntropySource::next_source { static_cast<size_t>(EntropySource::Static::MaxHardcodedSourceIndex) };
static void do_get_fast_random_bytes(Bytes buffer)
{
union {
u8 bytes[4];
u32 value;
} u;
size_t offset = 4;
for (size_t i = 0; i < buffer.size(); ++i) {
if (offset >= 4) {
auto current_next = s_next_random_value.load();
for (;;) {
auto new_next = current_next * 1103515245 + 12345;
if (s_next_random_value.compare_exchange_strong(current_next, new_next)) {
u.value = new_next;
break;
}
}
offset = 0;
}
buffer[i] = u.bytes[offset++];
}
}
bool get_good_random_bytes(Bytes buffer, bool allow_wait, bool fallback_to_fast)
{
bool result = false;
auto& kernel_rng = KernelRng::the();
// FIXME: What if interrupts are disabled because we're in an interrupt?
bool can_wait = Processor::are_interrupts_enabled();
if (!can_wait && allow_wait) {
// If we can't wait but the caller would be ok with it, then we
// need to definitely fallback to *something*, even if it's less
// secure...
fallback_to_fast = true;
}
if (can_wait && allow_wait) {
for (;;) {
{
if (kernel_rng.get_random_bytes(buffer)) {
result = true;
break;
}
}
kernel_rng.wait_for_entropy();
}
} else {
// We can't wait/block here, or we are not allowed to block/wait
if (kernel_rng.get_random_bytes(buffer)) {
result = true;
} else if (fallback_to_fast) {
// If interrupts are disabled
do_get_fast_random_bytes(buffer);
result = true;
}
}
// NOTE: The only case where this function should ever return false and
// not actually return random data is if fallback_to_fast == false and
// allow_wait == false and interrupts are enabled!
VERIFY(result || !fallback_to_fast);
return result;
}
void get_fast_random_bytes(Bytes buffer)
{
// Try to get good randomness, but don't block if we can't right now
// and allow falling back to fast randomness
auto result = get_good_random_bytes(buffer, false, true);
VERIFY(result);
}
}

203
Kernel/Security/Random.h Normal file
View file

@ -0,0 +1,203 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020, Peter Elliott <pelliott@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Assertions.h>
#include <AK/ByteBuffer.h>
#include <AK/Types.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/StdLib.h>
#include <LibCrypto/Cipher/AES.h>
#include <LibCrypto/Cipher/Cipher.h>
#include <LibCrypto/Hash/SHA2.h>
namespace Kernel {
template<typename CipherT, typename HashT, int KeySize>
class FortunaPRNG {
public:
constexpr static size_t pool_count = 32;
constexpr static size_t reseed_threshold = 16;
using CipherType = CipherT;
using BlockType = typename CipherT::BlockType;
using HashType = HashT;
using DigestType = typename HashT::DigestType;
// FIXME: Do something other than VERIFY()'ing in case of OOM.
FortunaPRNG()
: m_counter(ByteBuffer::create_zeroed(BlockType::block_size()).release_value_but_fixme_should_propagate_errors())
{
}
bool get_random_bytes(Bytes buffer)
{
SpinlockLocker lock(m_lock);
if (!is_ready())
return false;
if (m_p0_len >= reseed_threshold) {
this->reseed();
}
VERIFY(is_seeded());
// FIXME: More than 2^20 bytes cannot be generated without refreshing the key.
VERIFY(buffer.size() < (1 << 20));
typename CipherType::CTRMode cipher(m_key, KeySize, Crypto::Cipher::Intent::Encryption);
auto counter_span = m_counter.bytes();
cipher.key_stream(buffer, counter_span, &counter_span);
// Extract a new key from the prng stream.
Bytes key_span = m_key.bytes();
cipher.key_stream(key_span, counter_span, &counter_span);
return true;
}
template<typename T>
void add_random_event(T const& event_data, size_t pool)
{
pool %= pool_count;
if (pool == 0) {
m_p0_len++;
}
m_pools[pool].update(reinterpret_cast<u8 const*>(&event_data), sizeof(T));
}
[[nodiscard]] bool is_seeded() const
{
return m_reseed_number > 0;
}
[[nodiscard]] bool is_ready() const
{
VERIFY(m_lock.is_locked());
return is_seeded() || m_p0_len >= reseed_threshold;
}
Spinlock<LockRank::None>& get_lock() { return m_lock; }
private:
void reseed()
{
HashType new_key;
new_key.update(m_key);
for (size_t i = 0; i < pool_count; ++i) {
if (m_reseed_number % (1u << i) == 0) {
DigestType digest = m_pools[i].digest();
new_key.update(digest.immutable_data(), digest.data_length());
}
}
DigestType digest = new_key.digest();
if (m_key.size() == digest.data_length()) {
// Avoid reallocating, just overwrite the key.
m_key.overwrite(0, digest.immutable_data(), digest.data_length());
} else {
auto buffer_result = ByteBuffer::copy(digest.immutable_data(), digest.data_length());
// If there's no memory left to copy this into, bail out.
if (buffer_result.is_error())
return;
m_key = buffer_result.release_value();
}
m_reseed_number++;
m_p0_len = 0;
}
ByteBuffer m_counter;
size_t m_reseed_number { 0 };
size_t m_p0_len { 0 };
ByteBuffer m_key;
HashType m_pools[pool_count];
Spinlock<LockRank::None> m_lock {};
};
class KernelRng : public FortunaPRNG<Crypto::Cipher::AESCipher, Crypto::Hash::SHA256, 256> {
public:
KernelRng();
static KernelRng& the();
void wait_for_entropy();
void wake_if_ready();
private:
WaitQueue m_seed_queue;
};
class EntropySource {
template<typename T>
struct Event {
u64 timestamp;
size_t source;
T event_data;
};
public:
enum class Static : size_t {
Interrupts,
MaxHardcodedSourceIndex,
};
EntropySource()
: m_source(next_source++)
{
}
EntropySource(Static hardcoded_source)
: m_source(static_cast<size_t>(hardcoded_source))
{
}
template<typename T>
void add_random_event(T const& event_data)
{
auto& kernel_rng = KernelRng::the();
SpinlockLocker lock(kernel_rng.get_lock());
// We don't lock this because on the off chance a pool is corrupted, entropy isn't lost.
Event<T> event = { Processor::read_cpu_counter(), m_source, event_data };
kernel_rng.add_random_event(event, m_pool);
m_pool++;
kernel_rng.wake_if_ready();
}
private:
static size_t next_source;
size_t m_pool { 0 };
size_t m_source;
};
// NOTE: These API's are primarily about expressing intent/needs in the calling code.
// The only difference is that get_fast_random is guaranteed not to block.
void get_fast_random_bytes(Bytes);
bool get_good_random_bytes(Bytes bytes, bool allow_wait = true, bool fallback_to_fast = true);
template<typename T>
inline T get_fast_random()
{
T value;
Bytes bytes { reinterpret_cast<u8*>(&value), sizeof(T) };
get_fast_random_bytes(bytes);
return value;
}
template<typename T>
inline T get_good_random()
{
T value;
Bytes bytes { reinterpret_cast<u8*>(&value), sizeof(T) };
get_good_random_bytes(bytes);
return value;
}
}