1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 22:17:42 +00:00

AK: Add global FlatPtr typedef. It's u32 or u64, based on sizeof(void*)

Use this instead of uintptr_t throughout the codebase. This makes it
possible to pass a FlatPtr to something that has u32 and u64 overloads.
This commit is contained in:
Andreas Kling 2020-03-08 10:36:51 +01:00
parent b98d8ad5b0
commit b1058b33fb
36 changed files with 164 additions and 161 deletions

View file

@ -247,7 +247,7 @@ namespace ACPI {
dbg() << "ACPI: Looking for RSDP in EBDA @ V " << (void*)rsdp_str << ", P " << (void*)p_rsdp_str;
#endif
if (!strncmp("RSD PTR ", rsdp_str, strlen("RSD PTR ")))
return PhysicalAddress((uintptr_t)p_rsdp_str);
return PhysicalAddress((FlatPtr)p_rsdp_str);
p_rsdp_str += 16;
}
return {};
@ -262,7 +262,7 @@ namespace ACPI {
dbg() << "ACPI: Looking for RSDP in BIOS ROM area @ V " << (void*)rsdp_str << ", P " << (void*)p_rsdp_str;
#endif
if (!strncmp("RSD PTR ", rsdp_str, strlen("RSD PTR ")))
return PhysicalAddress((uintptr_t)p_rsdp_str);
return PhysicalAddress((FlatPtr)p_rsdp_str);
p_rsdp_str += 16;
}
return {};
@ -320,8 +320,8 @@ namespace ACPI {
auto main_sdt_region = MM.allocate_kernel_region(xsdt.page_base(), PAGE_SIZE, "ACPI Static Parsing search_table_in_xsdt()", Region::Access::Read, false, true);
auto* xsdt_ptr = (volatile Structures::XSDT*)main_sdt_region->vaddr().offset(xsdt.offset_in_page().get()).as_ptr();
for (u32 i = 0; i < ((xsdt_ptr->h.length - sizeof(Structures::SDTHeader)) / sizeof(u64)); i++) {
if (match_table_signature(PhysicalAddress((uintptr_t)xsdt_ptr->table_ptrs[i]), signature))
return PhysicalAddress((uintptr_t)xsdt_ptr->table_ptrs[i]);
if (match_table_signature(PhysicalAddress((FlatPtr)xsdt_ptr->table_ptrs[i]), signature))
return PhysicalAddress((FlatPtr)xsdt_ptr->table_ptrs[i]);
}
return {};
}
@ -347,8 +347,8 @@ namespace ACPI {
auto* rsdt_ptr = (volatile Structures::RSDT*)main_sdt_region->vaddr().offset(rsdt.offset_in_page().get()).as_ptr();
for (u32 i = 0; i < ((rsdt_ptr->h.length - sizeof(Structures::SDTHeader)) / sizeof(u32)); i++) {
if (match_table_signature(PhysicalAddress((uintptr_t)rsdt_ptr->table_ptrs[i]), signature))
return PhysicalAddress((uintptr_t)rsdt_ptr->table_ptrs[i]);
if (match_table_signature(PhysicalAddress((FlatPtr)rsdt_ptr->table_ptrs[i]), signature))
return PhysicalAddress((FlatPtr)rsdt_ptr->table_ptrs[i]);
}
return {};
}

View file

@ -134,7 +134,7 @@ void DMIDecoder::enumerate_smbios_tables()
size_t table_size = get_table_size(p_table);
p_table = p_table.offset(table_size);
v_table_ptr = (SMBIOS::TableHeader*)((uintptr_t)v_table_ptr + table_size);
v_table_ptr = (SMBIOS::TableHeader*)((FlatPtr)v_table_ptr + table_size);
#ifdef SMBIOS_DEBUG
dbg() << "DMIDecoder: Next table @ P 0x" << p_table.get();
#endif
@ -221,7 +221,7 @@ PhysicalAddress DMIDecoder::find_entry64bit_point()
dbg() << "DMI Decoder: Looking for 64 bit Entry point @ V " << (void*)entry_str << " P " << (void*)tested_physical_ptr;
#endif
if (!strncmp("_SM3_", entry_str, strlen("_SM3_")))
return PhysicalAddress((uintptr_t)tested_physical_ptr);
return PhysicalAddress((FlatPtr)tested_physical_ptr);
tested_physical_ptr += 16;
}
@ -239,7 +239,7 @@ PhysicalAddress DMIDecoder::find_entry32bit_point()
dbg() << "DMI Decoder: Looking for 32 bit Entry point @ V " << (void*)entry_str << " P " << (void*)tested_physical_ptr;
#endif
if (!strncmp("_SM_", entry_str, strlen("_SM_")))
return PhysicalAddress((uintptr_t)tested_physical_ptr);
return PhysicalAddress((FlatPtr)tested_physical_ptr);
tested_physical_ptr += 16;
}
@ -264,7 +264,7 @@ u64 DMIDecoder::get_bios_characteristics()
auto* bios_info = (SMBIOS::BIOSInfo*)get_smbios_physical_table_by_type(0).as_ptr();
ASSERT(bios_info != nullptr);
klog() << "DMIDecoder: BIOS info @ " << PhysicalAddress((uintptr_t)bios_info);
klog() << "DMIDecoder: BIOS info @ " << PhysicalAddress((FlatPtr)bios_info);
return bios_info->bios_characteristics;
}

View file

@ -45,9 +45,9 @@ void MultiProcessorParser::initialize()
MultiProcessorParser::MultiProcessorParser()
: m_floating_pointer(search_floating_pointer())
, m_operable((m_floating_pointer != (uintptr_t) nullptr))
, m_operable((m_floating_pointer != (FlatPtr) nullptr))
{
if (m_floating_pointer != (uintptr_t) nullptr) {
if (m_floating_pointer != (FlatPtr) nullptr) {
klog() << "MultiProcessor: Floating Pointer Structure @ " << PhysicalAddress(m_floating_pointer);
parse_floating_pointer_data();
parse_configuration_table();
@ -89,7 +89,7 @@ void MultiProcessorParser::parse_configuration_table()
p_entry = (MultiProcessor::EntryHeader*)(u32)p_entry + (u8)MultiProcessor::ConfigurationTableEntryLength::Processor;
break;
case ((u8)MultiProcessor::ConfigurationTableEntryType::Bus):
m_bus_entries.append((uintptr_t)p_entry);
m_bus_entries.append((FlatPtr)p_entry);
entry = (MultiProcessor::EntryHeader*)(u32)entry + (u8)MultiProcessor::ConfigurationTableEntryLength::Bus;
p_entry = (MultiProcessor::EntryHeader*)(u32)p_entry + (u8)MultiProcessor::ConfigurationTableEntryLength::Bus;
break;
@ -98,7 +98,7 @@ void MultiProcessorParser::parse_configuration_table()
p_entry = (MultiProcessor::EntryHeader*)(u32)p_entry + (u8)MultiProcessor::ConfigurationTableEntryLength::IOAPIC;
break;
case ((u8)MultiProcessor::ConfigurationTableEntryType::IO_Interrupt_Assignment):
m_io_interrupt_redirection_entries.append((uintptr_t)p_entry);
m_io_interrupt_redirection_entries.append((FlatPtr)p_entry);
entry = (MultiProcessor::EntryHeader*)(u32)entry + (u8)MultiProcessor::ConfigurationTableEntryLength::IO_Interrupt_Assignment;
p_entry = (MultiProcessor::EntryHeader*)(u32)p_entry + (u8)MultiProcessor::ConfigurationTableEntryLength::IO_Interrupt_Assignment;
break;
@ -124,20 +124,20 @@ void MultiProcessorParser::parse_configuration_table()
}
}
uintptr_t MultiProcessorParser::search_floating_pointer()
FlatPtr MultiProcessorParser::search_floating_pointer()
{
uintptr_t mp_floating_pointer = (uintptr_t) nullptr;
FlatPtr mp_floating_pointer = (FlatPtr) nullptr;
auto region = MM.allocate_kernel_region(PhysicalAddress(0), PAGE_SIZE, "MultiProcessor Parser Floating Pointer Structure Finding", Region::Access::Read);
u16 ebda_seg = (u16) * ((uint16_t*)((region->vaddr().get() & PAGE_MASK) + 0x40e));
klog() << "MultiProcessor: Probing EBDA, Segment 0x" << String::format("%x", ebda_seg);
mp_floating_pointer = search_floating_pointer_in_ebda(ebda_seg);
if (mp_floating_pointer != (uintptr_t) nullptr)
if (mp_floating_pointer != (FlatPtr) nullptr)
return mp_floating_pointer;
return search_floating_pointer_in_bios_area();
}
uintptr_t MultiProcessorParser::search_floating_pointer_in_ebda(u16 ebda_segment)
FlatPtr MultiProcessorParser::search_floating_pointer_in_ebda(u16 ebda_segment)
{
auto floating_pointer_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)(ebda_segment << 4))), PAGE_ROUND_UP(1024), "MultiProcessor Parser floating_pointer Finding #1", Region::Access::Read, false, true);
char* p_floating_pointer_str = (char*)(PhysicalAddress(ebda_segment << 4).as_ptr());
@ -146,12 +146,12 @@ uintptr_t MultiProcessorParser::search_floating_pointer_in_ebda(u16 ebda_segment
dbg() << "MultiProcessor: Looking for floating pointer structure in EBDA @ V0x " << String::format("%x", floating_pointer_str) << ", P0x" << String::format("%x", p_floating_pointer_str);
#endif
if (!strncmp("_MP_", floating_pointer_str, strlen("_MP_")))
return (uintptr_t)p_floating_pointer_str;
return (FlatPtr)p_floating_pointer_str;
p_floating_pointer_str += 16;
}
return (uintptr_t) nullptr;
return (FlatPtr) nullptr;
}
uintptr_t MultiProcessorParser::search_floating_pointer_in_bios_area()
FlatPtr MultiProcessorParser::search_floating_pointer_in_bios_area()
{
auto floating_pointer_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of((u32)0xE0000)), PAGE_ROUND_UP(0xFFFFF - 0xE0000), "MultiProcessor Parser floating_pointer Finding #2", Region::Access::Read, false, true);
char* p_floating_pointer_str = (char*)(PhysicalAddress(0xE0000).as_ptr());
@ -160,10 +160,10 @@ uintptr_t MultiProcessorParser::search_floating_pointer_in_bios_area()
dbg() << "MultiProcessor: Looking for floating pointer structure in BIOS area @ V0x " << String::format("%x", floating_pointer_str) << ", P0x" << String::format("%x", p_floating_pointer_str);
#endif
if (!strncmp("_MP_", floating_pointer_str, strlen("_MP_")))
return (uintptr_t)p_floating_pointer_str;
return (FlatPtr)p_floating_pointer_str;
p_floating_pointer_str += 16;
}
return (uintptr_t) nullptr;
return (FlatPtr) nullptr;
}
Vector<unsigned> MultiProcessorParser::get_pci_bus_ids()

View file

@ -225,14 +225,14 @@ protected:
Vector<unsigned> get_pci_bus_ids();
uintptr_t search_floating_pointer();
uintptr_t search_floating_pointer_in_ebda(u16 ebda_segment);
uintptr_t search_floating_pointer_in_bios_area();
FlatPtr search_floating_pointer();
FlatPtr search_floating_pointer_in_ebda(u16 ebda_segment);
FlatPtr search_floating_pointer_in_bios_area();
uintptr_t m_floating_pointer;
uintptr_t m_configuration_table;
Vector<uintptr_t> m_io_interrupt_redirection_entries;
Vector<uintptr_t> m_bus_entries;
FlatPtr m_floating_pointer;
FlatPtr m_configuration_table;
Vector<FlatPtr> m_io_interrupt_redirection_entries;
Vector<FlatPtr> m_bus_entries;
bool m_operable;
size_t m_configuration_table_length;

View file

@ -33,7 +33,7 @@
#define PAGE_SIZE 4096
#define GENERIC_INTERRUPT_HANDLERS_COUNT 128
#define PAGE_MASK ((uintptr_t)0xfffff000u)
#define PAGE_MASK ((FlatPtr)0xfffff000u)
namespace Kernel {
@ -451,24 +451,24 @@ struct [[gnu::aligned(16)]] FPUState
u8 buffer[512];
};
inline constexpr uintptr_t page_base_of(uintptr_t address)
inline constexpr FlatPtr page_base_of(FlatPtr address)
{
return address & PAGE_MASK;
}
inline uintptr_t page_base_of(const void* address)
inline FlatPtr page_base_of(const void* address)
{
return page_base_of((uintptr_t)address);
return page_base_of((FlatPtr)address);
}
inline constexpr uintptr_t offset_in_page(uintptr_t address)
inline constexpr FlatPtr offset_in_page(FlatPtr address)
{
return address & (~PAGE_MASK);
}
inline uintptr_t offset_in_page(const void* address)
inline FlatPtr offset_in_page(const void* address)
{
return offset_in_page((uintptr_t)address);
return offset_in_page((FlatPtr)address);
}
u32 read_cr3();

View file

@ -188,7 +188,7 @@ void kfree(void* ptr)
++g_kfree_call_count;
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
uintptr_t start = ((uintptr_t)a - (uintptr_t)BASE_PHYSICAL) / CHUNK_SIZE;
FlatPtr start = ((FlatPtr)a - (FlatPtr)BASE_PHYSICAL) / CHUNK_SIZE;
for (size_t k = start; k < (start + a->allocation_size_in_chunks); ++k)
alloc_map[k / 8] &= ~(1 << (k % 8));

View file

@ -250,14 +250,14 @@ bool E1000NetworkAdapter::link_up()
void E1000NetworkAdapter::initialize_rx_descriptors()
{
auto ptr = (uintptr_t)kmalloc_eternal(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16);
auto ptr = (FlatPtr)kmalloc_eternal(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16);
// Make sure it's 16-byte aligned.
if (ptr % 16)
ptr = (ptr + 16) - (ptr % 16);
m_rx_descriptors = (e1000_rx_desc*)ptr;
for (int i = 0; i < number_of_rx_descriptors; ++i) {
auto& descriptor = m_rx_descriptors[i];
auto addr = (uintptr_t)kmalloc_eternal(8192 + 16);
auto addr = (FlatPtr)kmalloc_eternal(8192 + 16);
if (addr % 16)
addr = (addr + 16) - (addr % 16);
descriptor.addr = addr - 0xc0000000;
@ -275,14 +275,14 @@ void E1000NetworkAdapter::initialize_rx_descriptors()
void E1000NetworkAdapter::initialize_tx_descriptors()
{
auto ptr = (uintptr_t)kmalloc_eternal(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16);
auto ptr = (FlatPtr)kmalloc_eternal(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16);
// Make sure it's 16-byte aligned.
if (ptr % 16)
ptr = (ptr + 16) - (ptr % 16);
m_tx_descriptors = (e1000_tx_desc*)ptr;
for (int i = 0; i < number_of_tx_descriptors; ++i) {
auto& descriptor = m_tx_descriptors[i];
auto addr = (uintptr_t)kmalloc_eternal(8192 + 16);
auto addr = (FlatPtr)kmalloc_eternal(8192 + 16);
if (addr % 16)
addr = (addr + 16) - (addr % 16);
descriptor.addr = addr - 0xc0000000;

View file

@ -154,16 +154,16 @@ RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address address, u8 irq)
// we add space to account for overhang from the last packet - the rtl8139
// can optionally guarantee that packets will be contiguous by
// purposefully overrunning the rx buffer
m_rx_buffer_addr = (uintptr_t)virtual_to_low_physical(kmalloc_aligned(RX_BUFFER_SIZE + PACKET_SIZE_MAX, 16));
m_rx_buffer_addr = (FlatPtr)virtual_to_low_physical(kmalloc_aligned(RX_BUFFER_SIZE + PACKET_SIZE_MAX, 16));
klog() << "RTL8139: RX buffer: " << PhysicalAddress(m_rx_buffer_addr);
auto tx_buffer_addr = (uintptr_t)virtual_to_low_physical(kmalloc_aligned(TX_BUFFER_SIZE * 4, 16));
auto tx_buffer_addr = (FlatPtr)virtual_to_low_physical(kmalloc_aligned(TX_BUFFER_SIZE * 4, 16));
for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) {
m_tx_buffer_addr[i] = tx_buffer_addr + TX_BUFFER_SIZE * i;
klog() << "RTL8139: TX buffer " << i << ": " << PhysicalAddress(m_tx_buffer_addr[i]);
}
m_packet_buffer = (uintptr_t)kmalloc(PACKET_SIZE_MAX);
m_packet_buffer = (FlatPtr)kmalloc(PACKET_SIZE_MAX);
reset();

View file

@ -37,7 +37,7 @@ PerformanceEventBuffer::PerformanceEventBuffer()
{
}
KResult PerformanceEventBuffer::append(int type, uintptr_t arg1, uintptr_t arg2)
KResult PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2)
{
if (count() >= capacity())
return KResult(-ENOBUFS);
@ -63,17 +63,17 @@ KResult PerformanceEventBuffer::append(int type, uintptr_t arg1, uintptr_t arg2)
return KResult(-EINVAL);
}
uintptr_t ebp;
FlatPtr ebp;
asm volatile("movl %%ebp, %%eax"
: "=a"(ebp));
//copy_from_user(&ebp, (uintptr_t*)current->get_register_dump_from_stack().ebp);
Vector<uintptr_t> backtrace;
//copy_from_user(&ebp, (FlatPtr*)current->get_register_dump_from_stack().ebp);
Vector<FlatPtr> backtrace;
{
SmapDisabler disabler;
backtrace = Thread::current->raw_backtrace(ebp);
}
event.stack_size = min(sizeof(event.stack) / sizeof(uintptr_t), static_cast<size_t>(backtrace.size()));
memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(uintptr_t));
event.stack_size = min(sizeof(event.stack) / sizeof(FlatPtr), static_cast<size_t>(backtrace.size()));
memcpy(event.stack, backtrace.data(), event.stack_size * sizeof(FlatPtr));
#ifdef VERY_DEBUG
for (size_t i = 0; i < event.stack_size; ++i)

View file

@ -34,13 +34,13 @@ namespace Kernel {
struct [[gnu::packed]] MallocPerformanceEvent
{
size_t size;
uintptr_t ptr;
FlatPtr ptr;
};
struct [[gnu::packed]] FreePerformanceEvent
{
size_t size;
uintptr_t ptr;
FlatPtr ptr;
};
struct [[gnu::packed]] PerformanceEvent
@ -52,14 +52,14 @@ struct [[gnu::packed]] PerformanceEvent
MallocPerformanceEvent malloc;
FreePerformanceEvent free;
} data;
uintptr_t stack[32];
FlatPtr stack[32];
};
class PerformanceEventBuffer {
public:
PerformanceEventBuffer();
KResult append(int type, uintptr_t arg1, uintptr_t arg2);
KResult append(int type, FlatPtr arg1, FlatPtr arg2);
size_t capacity() const { return m_buffer.size() / sizeof(PerformanceEvent); }
size_t count() const { return m_count; }

View file

@ -393,7 +393,7 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params)
if (size == 0)
return (void*)-EINVAL;
if ((uintptr_t)addr & ~PAGE_MASK)
if ((FlatPtr)addr & ~PAGE_MASK)
return (void*)-EINVAL;
bool map_shared = flags & MAP_SHARED;
@ -1324,7 +1324,7 @@ Process* Process::create_user_process(Thread*& first_thread, const String& path,
Process* Process::create_kernel_process(Thread*& first_thread, String&& name, void (*e)())
{
auto* process = new Process(first_thread, move(name), (uid_t)0, (gid_t)0, (pid_t)0, Ring0);
first_thread->tss().eip = (uintptr_t)e;
first_thread->tss().eip = (FlatPtr)e;
if (process->pid() != 0) {
InterruptDisabler disabler;
@ -1474,7 +1474,7 @@ int Process::sys$sigreturn(RegisterState& registers)
stack_ptr++;
//pop edi, esi, ebp, esp, ebx, edx, ecx and eax
memcpy(&registers.edi, stack_ptr, 8 * sizeof(uintptr_t));
memcpy(&registers.edi, stack_ptr, 8 * sizeof(FlatPtr));
stack_ptr += 8;
registers.eip = *stack_ptr;
@ -3740,13 +3740,13 @@ int Process::sys$create_thread(void* (*entry)(void*), void* argument, const Sysc
thread->set_joinable(is_thread_joinable);
auto& tss = thread->tss();
tss.eip = (uintptr_t)entry;
tss.eip = (FlatPtr)entry;
tss.eflags = 0x0202;
tss.cr3 = page_directory().cr3();
tss.esp = user_stack_address;
// NOTE: The stack needs to be 16-byte aligned.
thread->push_value_on_stack((uintptr_t)argument);
thread->push_value_on_stack((FlatPtr)argument);
thread->push_value_on_stack(0);
thread->make_thread_specific_region({});
@ -4531,7 +4531,7 @@ Thread& Process::any_thread()
WaitQueue& Process::futex_queue(i32* userspace_address)
{
auto& queue = m_futex_queues.ensure((uintptr_t)userspace_address);
auto& queue = m_futex_queues.ensure((FlatPtr)userspace_address);
if (!queue)
queue = make<WaitQueue>();
return *queue;
@ -4793,7 +4793,7 @@ int Process::sys$unveil(const Syscall::SC_unveil_params* user_params)
return 0;
}
int Process::sys$perf_event(int type, uintptr_t arg1, uintptr_t arg2)
int Process::sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2)
{
if (!m_perf_event_buffer)
m_perf_event_buffer = make<PerformanceEventBuffer>();

View file

@ -297,7 +297,7 @@ public:
int sys$chroot(const char* path, size_t path_length, int mount_flags);
int sys$pledge(const Syscall::SC_pledge_params*);
int sys$unveil(const Syscall::SC_unveil_params*);
int sys$perf_event(int type, uintptr_t arg1, uintptr_t arg2);
int sys$perf_event(int type, FlatPtr arg1, FlatPtr arg2);
template<bool sockname, typename Params>
int get_sock_or_peer_name(const Params&);

View file

@ -625,10 +625,10 @@ void Thread::set_default_signal_dispositions()
m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress(SIG_IGN);
}
void Thread::push_value_on_stack(uintptr_t value)
void Thread::push_value_on_stack(FlatPtr value)
{
m_tss.esp -= 4;
uintptr_t* stack_ptr = (uintptr_t*)m_tss.esp;
FlatPtr* stack_ptr = (FlatPtr*)m_tss.esp;
copy_to_user(stack_ptr, &value);
}
@ -681,9 +681,9 @@ u32 Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vecto
};
// NOTE: The stack needs to be 16-byte aligned.
push_on_new_stack((uintptr_t)env);
push_on_new_stack((uintptr_t)argv);
push_on_new_stack((uintptr_t)argc);
push_on_new_stack((FlatPtr)env);
push_on_new_stack((FlatPtr)argv);
push_on_new_stack((FlatPtr)argc);
push_on_new_stack(0);
return new_esp;
}
@ -797,20 +797,20 @@ String Thread::backtrace_impl() const
auto elf_bundle = process.elf_bundle();
ProcessPagingScope paging_scope(process);
uintptr_t stack_ptr = start_frame;
FlatPtr stack_ptr = start_frame;
for (;;) {
if (!process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2))
break;
uintptr_t retaddr;
FlatPtr retaddr;
if (is_user_range(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2)) {
copy_from_user(&retaddr, &((uintptr_t*)stack_ptr)[1]);
if (is_user_range(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2)) {
copy_from_user(&retaddr, &((FlatPtr*)stack_ptr)[1]);
recognized_symbols.append({ retaddr, ksymbolicate(retaddr) });
copy_from_user(&stack_ptr, (uintptr_t*)stack_ptr);
copy_from_user(&stack_ptr, (FlatPtr*)stack_ptr);
} else {
memcpy(&retaddr, &((uintptr_t*)stack_ptr)[1], sizeof(uintptr_t));
memcpy(&retaddr, &((FlatPtr*)stack_ptr)[1], sizeof(FlatPtr));
recognized_symbols.append({ retaddr, ksymbolicate(retaddr) });
memcpy(&stack_ptr, (uintptr_t*)stack_ptr, sizeof(uintptr_t));
memcpy(&stack_ptr, (FlatPtr*)stack_ptr, sizeof(FlatPtr));
}
}
@ -822,15 +822,15 @@ String Thread::backtrace_impl() const
return builder.to_string();
}
Vector<uintptr_t> Thread::raw_backtrace(uintptr_t ebp) const
Vector<FlatPtr> Thread::raw_backtrace(FlatPtr ebp) const
{
InterruptDisabler disabler;
auto& process = const_cast<Process&>(this->process());
ProcessPagingScope paging_scope(process);
Vector<uintptr_t, Profiling::max_stack_frame_count> backtrace;
Vector<FlatPtr, Profiling::max_stack_frame_count> backtrace;
backtrace.append(ebp);
for (uintptr_t* stack_ptr = (uintptr_t*)ebp; process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2) && MM.can_read_without_faulting(process, VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) {
uintptr_t retaddr = stack_ptr[1];
for (FlatPtr* stack_ptr = (FlatPtr*)ebp; process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2) && MM.can_read_without_faulting(process, VirtualAddress(stack_ptr), sizeof(FlatPtr) * 2); stack_ptr = (FlatPtr*)*stack_ptr) {
FlatPtr retaddr = stack_ptr[1];
backtrace.append(retaddr);
if (backtrace.size() == Profiling::max_stack_frame_count)
break;

View file

@ -97,7 +97,7 @@ public:
const Process& process() const { return m_process; }
String backtrace(ProcessInspectionHandle&) const;
Vector<uintptr_t> raw_backtrace(uintptr_t ebp) const;
Vector<FlatPtr> raw_backtrace(FlatPtr ebp) const;
const String& name() const { return m_name; }
void set_name(StringView s) { m_name = s; }
@ -360,7 +360,7 @@ public:
FPUState& fpu_state() { return *m_fpu_state; }
void set_default_signal_dispositions();
void push_value_on_stack(uintptr_t);
void push_value_on_stack(FlatPtr);
u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment);

View file

@ -41,9 +41,9 @@
//#define MM_DEBUG
//#define PAGE_FAULT_DEBUG
extern uintptr_t start_of_kernel_text;
extern uintptr_t start_of_kernel_data;
extern uintptr_t end_of_kernel_bss;
extern FlatPtr start_of_kernel_text;
extern FlatPtr start_of_kernel_data;
extern FlatPtr end_of_kernel_bss;
namespace Kernel {
@ -72,14 +72,14 @@ MemoryManager::~MemoryManager()
void MemoryManager::protect_kernel_image()
{
// Disable writing to the kernel text and rodata segments.
for (size_t i = (uintptr_t)&start_of_kernel_text; i < (uintptr_t)&start_of_kernel_data; i += PAGE_SIZE) {
for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_writable(false);
}
if (g_cpu_supports_nx) {
// Disable execution of the kernel data and bss segments.
for (size_t i = (uintptr_t)&start_of_kernel_data; i < (uintptr_t)&end_of_kernel_bss; i += PAGE_SIZE) {
for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) {
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_execute_disabled(true);
}
@ -104,7 +104,7 @@ void MemoryManager::setup_low_identity_mapping()
if (g_cpu_supports_nx)
pde_zero.set_execute_disabled(true);
for (uintptr_t offset = (1 * MB); offset < (2 * MB); offset += PAGE_SIZE) {
for (FlatPtr offset = (1 * MB); offset < (2 * MB); offset += PAGE_SIZE) {
auto& page_table_page = m_low_page_table;
auto& pte = quickmap_pt(page_table_page->paddr())[offset / PAGE_SIZE];
pte.set_physical_page_base(offset);
@ -132,7 +132,7 @@ void MemoryManager::parse_memory_map()
if ((mmap->addr + mmap->len) > 0xffffffff)
continue;
auto diff = (uintptr_t)mmap->addr % PAGE_SIZE;
auto diff = (FlatPtr)mmap->addr % PAGE_SIZE;
if (diff != 0) {
klog() << "MM: got an unaligned region base from the bootloader; correcting " << String::format("%p", mmap->addr) << " by " << diff << " bytes";
diff = PAGE_SIZE - diff;
@ -149,7 +149,7 @@ void MemoryManager::parse_memory_map()
}
#ifdef MM_DEBUG
klog() << "MM: considering memory at " << String::format("%p", (uintptr_t)mmap->addr) << " - " << String::format("%p", (uintptr_t)(mmap->addr + mmap->len));
klog() << "MM: considering memory at " << String::format("%p", (FlatPtr)mmap->addr) << " - " << String::format("%p", (FlatPtr)(mmap->addr + mmap->len));
#endif
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
@ -196,7 +196,7 @@ const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, Vi
if (!pde.is_present())
return nullptr;
return &quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
@ -224,7 +224,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
page_directory.m_physical_pages.set(page_directory_index, move(page_table));
}
return quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
void MemoryManager::initialize()

View file

@ -32,9 +32,9 @@
namespace Kernel {
static const uintptr_t userspace_range_base = 0x00800000;
static const uintptr_t userspace_range_ceiling = 0xbe000000;
static const uintptr_t kernelspace_range_base = 0xc0800000;
static const FlatPtr userspace_range_base = 0x00800000;
static const FlatPtr userspace_range_ceiling = 0xbe000000;
static const FlatPtr kernelspace_range_base = 0xc0800000;
static HashMap<u32, PageDirectory*>& cr3_map()
{
@ -60,9 +60,9 @@ PageDirectory::PageDirectory()
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
// Adopt the page tables already set up by boot.S
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((uintptr_t)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((uintptr_t)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((uintptr_t)boot_pd3));
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
klog() << "MM: boot_pdpt @ " << boot_pdpt_paddr;
klog() << "MM: boot_pd0 @ " << boot_pd0_paddr;
klog() << "MM: boot_pd3 @ " << boot_pd3_paddr;

View file

@ -105,9 +105,9 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr)
ptrdiff_t local_offset = addr.get() - m_lower.get();
ASSERT(local_offset >= 0);
ASSERT((uintptr_t)local_offset < (uintptr_t)(m_pages * PAGE_SIZE));
ASSERT((FlatPtr)local_offset < (FlatPtr)(m_pages * PAGE_SIZE));
auto page = (uintptr_t)local_offset / PAGE_SIZE;
auto page = (FlatPtr)local_offset / PAGE_SIZE;
if (page < m_last)
m_last = page;

View file

@ -112,8 +112,8 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
if (available_range.size() < (effective_size + alignment))
continue;
uintptr_t initial_base = available_range.base().offset(offset_from_effective_base).get();
uintptr_t aligned_base = round_up_to_power_of_two(initial_base, alignment);
FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
Range allocated_range(VirtualAddress(aligned_base), size);
if (available_range == allocated_range) {