1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-06-01 08:38:10 +00:00

AK: Rename the common integer typedefs to make it obvious what they are.

These types can be picked up by including <AK/Types.h>:

* u8, u16, u32, u64 (unsigned)
* i8, i16, i32, i64 (signed)
This commit is contained in:
Andreas Kling 2019-07-03 21:17:35 +02:00
parent c4c4bbc5ba
commit 27f699ef0c
208 changed files with 1603 additions and 1621 deletions

View file

@ -84,11 +84,11 @@ void MemoryManager::initialize_paging()
for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
(dword)(mmap->addr >> 32),
(dword)(mmap->addr & 0xffffffff),
(dword)(mmap->len >> 32),
(dword)(mmap->len & 0xffffffff),
(dword)mmap->type);
(u32)(mmap->addr >> 32),
(u32)(mmap->addr & 0xffffffff),
(u32)(mmap->len >> 32),
(u32)(mmap->len & 0xffffffff),
(u32)mmap->type);
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
continue;
@ -99,7 +99,7 @@ void MemoryManager::initialize_paging()
#ifdef MM_DEBUG
kprintf("MM: considering memory at %p - %p\n",
(dword)mmap->addr, (dword)(mmap->addr + mmap->len));
(u32)mmap->addr, (u32)(mmap->addr + mmap->len));
#endif
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
@ -163,7 +163,7 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Virtu
{
InterruptDisabler disabler;
// FIXME: ASSERT(vaddr is 4KB aligned);
for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
for (u32 offset = 0; offset < size; offset += PAGE_SIZE) {
auto pte_address = vaddr.offset(offset);
auto& pte = ensure_pte(page_directory, pte_address);
pte.set_physical_page_base(0);
@ -177,8 +177,8 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Virtu
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
dword page_directory_index = (vaddr.get() >> 22) & 0x3ff;
dword page_table_index = (vaddr.get() >> 12) & 0x3ff;
u32 page_directory_index = (vaddr.get() >> 22) & 0x3ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x3ff;
PageDirectoryEntry& pde = page_directory.entries()[page_directory_index];
if (!pde.is_present()) {
@ -187,13 +187,13 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
#endif
if (page_directory_index == 0) {
ASSERT(&page_directory == m_kernel_page_directory);
pde.set_page_table_base((dword)m_page_table_zero);
pde.set_page_table_base((u32)m_page_table_zero);
pde.set_user_allowed(false);
pde.set_present(true);
pde.set_writable(true);
} else if (page_directory_index == 1) {
ASSERT(&page_directory == m_kernel_page_directory);
pde.set_page_table_base((dword)m_page_table_one);
pde.set_page_table_base((u32)m_page_table_one);
pde.set_user_allowed(false);
pde.set_present(true);
pde.set_writable(true);
@ -224,7 +224,7 @@ void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
{
InterruptDisabler disabler;
ASSERT(vaddr.is_page_aligned());
for (dword offset = 0; offset < length; offset += PAGE_SIZE) {
for (u32 offset = 0; offset < length; offset += PAGE_SIZE) {
auto pte_address = vaddr.offset(offset);
auto& pte = ensure_pte(kernel_page_directory(), pte_address);
pte.set_physical_page_base(pte_address.get());
@ -239,7 +239,7 @@ void MemoryManager::create_identity_mapping(PageDirectory& page_directory, Virtu
{
InterruptDisabler disabler;
ASSERT((vaddr.get() & ~PAGE_MASK) == 0);
for (dword offset = 0; offset < size; offset += PAGE_SIZE) {
for (u32 offset = 0; offset < size; offset += PAGE_SIZE) {
auto pte_address = vaddr.offset(offset);
auto& pte = ensure_pte(page_directory, pte_address);
pte.set_physical_page_base(pte_address.get());
@ -336,8 +336,8 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
#endif
auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
byte* dest_ptr = quickmap_page(*physical_page);
const byte* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
u8* dest_ptr = quickmap_page(*physical_page);
const u8* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
#ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> COW P%x <- P%x\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
#endif
@ -374,7 +374,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
dbgprintf("MM: page_in_from_inode ready to read from inode\n");
#endif
sti();
byte page_buffer[PAGE_SIZE];
u8 page_buffer[PAGE_SIZE];
auto& inode = *vmo.inode();
auto nread = inode.read_bytes(vmo.inode_offset() + ((region.first_page_index() + page_index_in_region) * PAGE_SIZE), PAGE_SIZE, page_buffer, nullptr);
if (nread < 0) {
@ -392,7 +392,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
return false;
}
remap_region_page(region, page_index_in_region, true);
byte* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
return true;
}
@ -406,7 +406,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
#endif
ASSERT(fault.vaddr() != m_quickmap_addr);
if (fault.is_not_present() && fault.vaddr().get() >= 0xc0000000) {
dword page_directory_index = (fault.vaddr().get() >> 22) & 0x3ff;
u32 page_directory_index = (fault.vaddr().get() >> 22) & 0x3ff;
if (kernel_page_directory().entries()[page_directory_index].is_present()) {
current->process().page_directory().entries()[page_directory_index].copy_from({}, kernel_page_directory().entries()[page_directory_index]);
dbgprintf("NP(kernel): copying new kernel mapping for L%x into process\n", fault.vaddr().get());
@ -510,8 +510,8 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
#endif
if (should_zero_fill == ShouldZeroFill::Yes) {
auto* ptr = (dword*)quickmap_page(*page);
fast_dword_fill(ptr, 0, PAGE_SIZE / sizeof(dword));
auto* ptr = (u32*)quickmap_page(*page);
fast_u32_fill(ptr, 0, PAGE_SIZE / sizeof(u32));
unquickmap_page();
}
@ -563,7 +563,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
#endif
fast_dword_fill((dword*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(dword));
fast_u32_fill((u32*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(u32));
++m_super_physical_pages_used;
return page;
}
@ -603,7 +603,7 @@ void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr)
flush_tlb(vaddr);
}
byte* MemoryManager::quickmap_page(PhysicalPage& physical_page)
u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(!m_quickmap_in_use);
@ -615,7 +615,7 @@ byte* MemoryManager::quickmap_page(PhysicalPage& physical_page)
pte.set_writable(true);
pte.set_user_allowed(false);
flush_tlb(page_vaddr);
ASSERT((dword)pte.physical_page_base() == physical_page.paddr().get());
ASSERT((u32)pte.physical_page_base() == physical_page.paddr().get());
#ifdef MM_DEBUG
dbgprintf("MM: >> quickmap_page L%x => P%x @ PTE=%p\n", page_vaddr, physical_page.paddr().get(), pte.ptr());
#endif

View file

@ -19,7 +19,7 @@
#include <Kernel/VM/VMObject.h>
#include <Kernel/VirtualAddress.h>
#define PAGE_ROUND_UP(x) ((((dword)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
#define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
class SynthFSInode;
@ -108,7 +108,7 @@ private:
bool page_in_from_inode(Region&, unsigned page_index_in_region);
bool zero_page(Region& region, unsigned page_index_in_region);
byte* quickmap_page(PhysicalPage&);
u8* quickmap_page(PhysicalPage&);
void unquickmap_page();
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }

View file

@ -3,8 +3,8 @@
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
static const dword userspace_range_base = 0x01000000;
static const dword kernelspace_range_base = 0xc0000000;
static const u32 userspace_range_base = 0x01000000;
static const u32 kernelspace_range_base = 0xc0000000;
PageDirectory::PageDirectory(PhysicalAddress paddr)
: m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000)

View file

@ -14,7 +14,7 @@ public:
static NonnullRefPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
~PageDirectory();
dword cr3() const { return m_directory_page->paddr().get(); }
u32 cr3() const { return m_directory_page->paddr().get(); }
PageDirectoryEntry* entries() { return reinterpret_cast<PageDirectoryEntry*>(cr3()); }
void flush(VirtualAddress);

View file

@ -30,7 +30,7 @@ public:
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
word ref_count() const { return m_retain_count; }
u16 ref_count() const { return m_retain_count; }
private:
PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
@ -38,7 +38,7 @@ private:
void return_to_freelist() &&;
word m_retain_count { 1 };
u16 m_retain_count { 1 };
bool m_may_return_to_freelist { true };
bool m_supervisor { false };
PhysicalAddress m_paddr;

View file

@ -4,7 +4,7 @@
#include <Kernel/VM/Region.h>
#include <Kernel/VM/VMObject.h>
Region::Region(const Range& range, const String& name, byte access, bool cow)
Region::Region(const Range& range, const String& name, u8 access, bool cow)
: m_range(range)
, m_vmo(VMObject::create_anonymous(size()))
, m_name(name)
@ -15,7 +15,7 @@ Region::Region(const Range& range, const String& name, byte access, bool cow)
MM.register_region(*this);
}
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, byte access)
Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access)
: m_range(range)
, m_vmo(VMObject::create_file_backed(move(inode)))
, m_name(name)
@ -25,7 +25,7 @@ Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, by
MM.register_region(*this);
}
Region::Region(const Range& range, NonnullRefPtr<VMObject>&& vmo, size_t offset_in_vmo, const String& name, byte access, bool cow)
Region::Region(const Range& range, NonnullRefPtr<VMObject>&& vmo, size_t offset_in_vmo, const String& name, u8 access, bool cow)
: m_range(range)
, m_offset_in_vmo(offset_in_vmo)
, m_vmo(move(vmo))

View file

@ -18,9 +18,9 @@ public:
Execute = 4,
};
Region(const Range&, const String&, byte access, bool cow = false);
Region(const Range&, NonnullRefPtr<VMObject>&&, size_t offset_in_vmo, const String&, byte access, bool cow = false);
Region(const Range&, RefPtr<Inode>&&, const String&, byte access);
Region(const Range&, const String&, u8 access, bool cow = false);
Region(const Range&, NonnullRefPtr<VMObject>&&, size_t offset_in_vmo, const String&, u8 access, bool cow = false);
Region(const Range&, RefPtr<Inode>&&, const String&, u8 access);
~Region();
VirtualAddress vaddr() const { return m_range.base(); }
@ -102,7 +102,7 @@ private:
size_t m_offset_in_vmo { 0 };
NonnullRefPtr<VMObject> m_vmo;
String m_name;
byte m_access { 0 };
u8 m_access { 0 };
bool m_shared { false };
Bitmap m_cow_map;
};

View file

@ -118,7 +118,7 @@ void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size
});
}
void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const byte* data)
void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
{
(void)size;
(void)data;
@ -132,7 +132,7 @@ void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size,
#if 0
size_t current_offset = offset;
size_t remaining_bytes = size;
const byte* data_ptr = data;
const u8* data_ptr = data;
auto to_page_index = [] (size_t offset) -> size_t {
return offset / PAGE_SIZE;

View file

@ -37,7 +37,7 @@ public:
const Vector<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }
Vector<RefPtr<PhysicalPage>>& physical_pages() { return m_physical_pages; }
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const byte*);
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
size_t size() const { return m_size; }