1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 10:57:35 +00:00

Kernel: Move Kernel/Memory/ code into Kernel::Memory namespace

This commit is contained in:
Andreas Kling 2021-08-06 13:49:36 +02:00
parent a1d7ebf85a
commit 93d98d4976
153 changed files with 473 additions and 467 deletions

View file

@ -53,7 +53,7 @@ UNMAP_AFTER_INIT DMIEntryPointExposedBlob::DMIEntryPointExposedBlob(PhysicalAddr
OwnPtr<KBuffer> DMIEntryPointExposedBlob::try_to_generate_buffer() const
{
auto dmi_blob = map_typed<u8>((m_dmi_entry_point), m_dmi_entry_point_length);
auto dmi_blob = Memory::map_typed<u8>((m_dmi_entry_point), m_dmi_entry_point_length);
return KBuffer::try_create_with_bytes(Span<u8> { dmi_blob.ptr(), m_dmi_entry_point_length });
}
@ -71,14 +71,14 @@ UNMAP_AFTER_INIT SMBIOSExposedTable::SMBIOSExposedTable(PhysicalAddress smbios_s
OwnPtr<KBuffer> SMBIOSExposedTable::try_to_generate_buffer() const
{
auto dmi_blob = map_typed<u8>((m_smbios_structure_table), m_smbios_structure_table_length);
auto dmi_blob = Memory::map_typed<u8>((m_smbios_structure_table), m_smbios_structure_table_length);
return KBuffer::try_create_with_bytes(Span<u8> { dmi_blob.ptr(), m_smbios_structure_table_length });
}
UNMAP_AFTER_INIT void BIOSSysFSDirectory::set_dmi_64_bit_entry_initialization_values()
{
dbgln("BIOSSysFSDirectory: SMBIOS 64bit Entry point @ {}", m_dmi_entry_point);
auto smbios_entry = map_typed<SMBIOS::EntryPoint64bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
auto smbios_entry = Memory::map_typed<SMBIOS::EntryPoint64bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
m_smbios_structure_table = PhysicalAddress(smbios_entry.ptr()->table_ptr);
m_dmi_entry_point_length = smbios_entry.ptr()->length;
m_smbios_structure_table_length = smbios_entry.ptr()->table_maximum_size;
@ -87,7 +87,7 @@ UNMAP_AFTER_INIT void BIOSSysFSDirectory::set_dmi_64_bit_entry_initialization_va
UNMAP_AFTER_INIT void BIOSSysFSDirectory::set_dmi_32_bit_entry_initialization_values()
{
dbgln("BIOSSysFSDirectory: SMBIOS 32bit Entry point @ {}", m_dmi_entry_point);
auto smbios_entry = map_typed<SMBIOS::EntryPoint32bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
auto smbios_entry = Memory::map_typed<SMBIOS::EntryPoint32bit>(m_dmi_entry_point, SMBIOS_SEARCH_AREA_SIZE);
m_smbios_structure_table = PhysicalAddress(smbios_entry.ptr()->legacy_structure.smbios_table_ptr);
m_dmi_entry_point_length = smbios_entry.ptr()->length;
m_smbios_structure_table_length = smbios_entry.ptr()->legacy_structure.smboios_table_length;
@ -130,7 +130,7 @@ UNMAP_AFTER_INIT void BIOSSysFSDirectory::initialize_dmi_exposer()
OwnPtr<KBuffer> BIOSSysFSDirectory::smbios_structure_table() const
{
auto dmi_blob = map_typed<u8>(m_smbios_structure_table, m_smbios_structure_table_length);
auto dmi_blob = Memory::map_typed<u8>(m_smbios_structure_table, m_smbios_structure_table_length);
return KBuffer::try_create_with_bytes(Span<u8> { dmi_blob.ptr(), m_smbios_structure_table_length });
}
@ -160,26 +160,26 @@ UNMAP_AFTER_INIT Optional<PhysicalAddress> BIOSSysFSDirectory::find_dmi_entry32b
return map_bios().find_chunk_starting_with("_SM_", 16);
}
MappedROM map_bios()
Memory::MappedROM map_bios()
{
MappedROM mapping;
Memory::MappedROM mapping;
mapping.size = 128 * KiB;
mapping.paddr = PhysicalAddress(0xe0000);
mapping.region = MM.allocate_kernel_region(mapping.paddr, page_round_up(mapping.size), {}, Region::Access::Read);
mapping.region = MM.allocate_kernel_region(mapping.paddr, Memory::page_round_up(mapping.size), {}, Memory::Region::Access::Read);
return mapping;
}
MappedROM map_ebda()
Memory::MappedROM map_ebda()
{
auto ebda_segment_ptr = map_typed<u16>(PhysicalAddress(0x40e));
auto ebda_length_ptr_b0 = map_typed<u8>(PhysicalAddress(0x413));
auto ebda_length_ptr_b1 = map_typed<u8>(PhysicalAddress(0x414));
auto ebda_segment_ptr = Memory::map_typed<u16>(PhysicalAddress(0x40e));
auto ebda_length_ptr_b0 = Memory::map_typed<u8>(PhysicalAddress(0x413));
auto ebda_length_ptr_b1 = Memory::map_typed<u8>(PhysicalAddress(0x414));
PhysicalAddress ebda_paddr(*ebda_segment_ptr << 4);
size_t ebda_size = (*ebda_length_ptr_b1 << 8) | *ebda_length_ptr_b0;
MappedROM mapping;
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), page_round_up(ebda_size), {}, Region::Access::Read);
Memory::MappedROM mapping;
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), Memory::page_round_up(ebda_size), {}, Memory::Region::Access::Read);
mapping.offset = ebda_paddr.offset_in_page();
mapping.size = ebda_size;
mapping.paddr = ebda_paddr;

View file

@ -55,8 +55,8 @@ struct [[gnu::packed]] EntryPoint64bit {
namespace Kernel {
MappedROM map_bios();
MappedROM map_ebda();
Memory::MappedROM map_bios();
Memory::MappedROM map_ebda();
class BIOSSysFSComponent : public SysFSComponent {
public:

View file

@ -8,13 +8,11 @@
#include <AK/Badge.h>
#include <AK/Types.h>
#include <Kernel/Forward.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
class PageDirectory;
class PageTableEntry;
class PageDirectoryEntry {
public:
PhysicalPtr page_table_base() const { return PhysicalAddress::physical_page_base(m_raw); }
@ -28,7 +26,7 @@ public:
void clear() { m_raw = 0; }
u64 raw() const { return m_raw; }
void copy_from(Badge<PageDirectory>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
void copy_from(Badge<Memory::PageDirectory>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
enum Flags {
Present = 1 << 0,

View file

@ -57,7 +57,7 @@ struct ProcessorMessage {
ProcessorMessage* next; // only valid while in the pool
alignas(CallbackFunction) u8 callback_storage[sizeof(CallbackFunction)];
struct {
const PageDirectory* page_directory;
Memory::PageDirectory const* page_directory;
u8* ptr;
size_t page_count;
} flush_tlb;
@ -211,7 +211,7 @@ public:
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(const PageDirectory*, VirtualAddress, size_t);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
Descriptor& get_gdt_entry(u16 selector);
void flush_gdt();
@ -391,7 +391,7 @@ public:
bool smp_process_pending_messages();
static void smp_unicast(u32 cpu, Function<void()>, bool async);
static void smp_broadcast_flush_tlb(const PageDirectory*, VirtualAddress, size_t);
static void smp_broadcast_flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
static u32 smp_wake_n_idle_processors(u32 wake_count);
static void deferred_call_queue(Function<void()> callback);

View file

@ -469,7 +469,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (max_frames != 0 && count > max_frames)
break;
if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
if (Memory::is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
if (!copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]) || !retaddr)
break;
stack_trace.append(retaddr);
@ -545,7 +545,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
ProcessPagingScope paging_scope(thread.process());
auto& regs = thread.regs();
FlatPtr* stack_top = reinterpret_cast<FlatPtr*>(regs.sp());
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (Memory::is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
} else {
@ -657,9 +657,9 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
}
}
void Processor::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
void Processor::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
{
if (s_smp_enabled && (!is_user_address(vaddr) || Process::current()->thread_count() > 1))
if (s_smp_enabled && (!Memory::is_user_address(vaddr) || Process::current()->thread_count() > 1))
smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
else
flush_tlb_local(vaddr, page_count);
@ -818,9 +818,9 @@ bool Processor::smp_process_pending_messages()
msg->invoke_callback();
break;
case ProcessorMessage::FlushTlb:
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
if (Memory::is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
// We assume that we don't cross into kernel land!
VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
VERIFY(Memory::is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
// This processor isn't using this page directory right now, we can ignore this request
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
@ -949,7 +949,7 @@ void Processor::smp_unicast(u32 cpu, Function<void()> callback, bool async)
smp_unicast_message(cpu, msg, async);
}
void Processor::smp_broadcast_flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
void Processor::smp_broadcast_flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
{
auto& msg = smp_get_from_pool();
msg.async = false;