diff --git a/Kernel/Arch/aarch64/Processor.h b/Kernel/Arch/aarch64/Processor.h index 4ea7d65862..512c13da45 100644 --- a/Kernel/Arch/aarch64/Processor.h +++ b/Kernel/Arch/aarch64/Processor.h @@ -63,6 +63,8 @@ public: return 0; } + ALWAYS_INLINE static u64 read_cpu_counter() { return 0; } + ALWAYS_INLINE static void enter_critical() { } ALWAYS_INLINE static void leave_critical() { } ALWAYS_INLINE static u32 in_critical() diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index d7df2b257b..cd93b2bf37 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -158,6 +158,11 @@ public: return *g_total_processors.ptr(); } + ALWAYS_INLINE static u64 read_cpu_counter() + { + return read_tsc(); + } + ALWAYS_INLINE static void pause() { asm volatile("pause"); diff --git a/Kernel/Random.h b/Kernel/Random.h index 8aaf0eaaf7..0213b83c9d 100644 --- a/Kernel/Random.h +++ b/Kernel/Random.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -163,7 +164,7 @@ public: auto& kernel_rng = KernelRng::the(); SpinlockLocker lock(kernel_rng.get_lock()); // We don't lock this because on the off chance a pool is corrupted, entropy isn't lost. - Event event = { read_tsc(), m_source, event_data }; + Event event = { Processor::read_cpu_counter(), m_source, event_data }; kernel_rng.add_random_event(event, m_pool); m_pool++; kernel_rng.wake_if_ready();