mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 20:12:43 +00:00 
			
		
		
		
	Kernel: Rename LinearAddress => VirtualAddress.
This commit is contained in:
		
							parent
							
								
									0ed89440f1
								
							
						
					
					
						commit
						e42c3b4fd7
					
				
					 33 changed files with 272 additions and 272 deletions
				
			
		|  | @ -57,7 +57,7 @@ public: | |||
|         dword type() const { return m_program_header.p_type; } | ||||
|         dword flags() const { return m_program_header.p_flags; } | ||||
|         dword offset() const { return m_program_header.p_offset; } | ||||
|         LinearAddress laddr() const { return LinearAddress(m_program_header.p_vaddr); } | ||||
|         VirtualAddress vaddr() const { return VirtualAddress(m_program_header.p_vaddr); } | ||||
|         dword size_in_memory() const { return m_program_header.p_memsz; } | ||||
|         dword size_in_image() const { return m_program_header.p_filesz; } | ||||
|         dword alignment() const { return m_program_header.p_align; } | ||||
|  | @ -117,7 +117,7 @@ public: | |||
|     bool is_executable() const { return header().e_type == ET_EXEC; } | ||||
|     bool is_relocatable() const { return header().e_type == ET_REL; } | ||||
| 
 | ||||
|     LinearAddress entry() const { return LinearAddress(header().e_entry); } | ||||
|     VirtualAddress entry() const { return VirtualAddress(header().e_entry); } | ||||
| 
 | ||||
| private: | ||||
|     bool parse_header(); | ||||
|  |  | |||
|  | @ -34,21 +34,21 @@ bool ELFLoader::layout() | |||
|         if (program_header.type() != PT_LOAD) | ||||
|             return; | ||||
| #ifdef ELFLOADER_DEBUG | ||||
|         kprintf("PH: L%x %u r:%u w:%u\n", program_header.laddr().get(), program_header.size_in_memory(), program_header.is_readable(), program_header.is_writable()); | ||||
|         kprintf("PH: L%x %u r:%u w:%u\n", program_header.vaddr().get(), program_header.size_in_memory(), program_header.is_readable(), program_header.is_writable()); | ||||
| #endif | ||||
|         if (program_header.is_writable()) { | ||||
|             alloc_section_hook( | ||||
|                 program_header.laddr(), | ||||
|                 program_header.vaddr(), | ||||
|                 program_header.size_in_memory(), | ||||
|                 program_header.alignment(), | ||||
|                 program_header.is_readable(), | ||||
|                 program_header.is_writable(), | ||||
|                 String::format("elf-alloc-%s%s", program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "") | ||||
|             ); | ||||
|             memcpy(program_header.laddr().as_ptr(), program_header.raw_data(), program_header.size_in_image()); | ||||
|             memcpy(program_header.vaddr().as_ptr(), program_header.raw_data(), program_header.size_in_image()); | ||||
|         } else { | ||||
|             map_section_hook( | ||||
|                 program_header.laddr(), | ||||
|                 program_header.vaddr(), | ||||
|                 program_header.size_in_memory(), | ||||
|                 program_header.alignment(), | ||||
|                 program_header.offset(), | ||||
|  |  | |||
|  | @ -5,7 +5,7 @@ | |||
| #include <AK/OwnPtr.h> | ||||
| #include <AK/Vector.h> | ||||
| #if defined(KERNEL) | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| #endif | ||||
| #include <AK/ELF/ELFImage.h> | ||||
| 
 | ||||
|  | @ -16,9 +16,9 @@ public: | |||
| 
 | ||||
|     bool load(); | ||||
| #if defined(KERNEL) | ||||
|     Function<void*(LinearAddress, size_t, size_t, bool, bool, const String&)> alloc_section_hook; | ||||
|     Function<void*(LinearAddress, size_t, size_t, size_t, bool r, bool w, bool x, const String&)> map_section_hook; | ||||
|     LinearAddress entry() const { return m_image.entry(); } | ||||
|     Function<void*(VirtualAddress, size_t, size_t, bool, bool, const String&)> alloc_section_hook; | ||||
|     Function<void*(VirtualAddress, size_t, size_t, size_t, bool r, bool w, bool x, const String&)> map_section_hook; | ||||
|     VirtualAddress entry() const { return m_image.entry(); } | ||||
| #endif | ||||
|     char* symbol_ptr(const char* name); | ||||
| 
 | ||||
|  |  | |||
|  | @ -84,21 +84,21 @@ dword BXVGADevice::find_framebuffer_address() | |||
|     return framebuffer_address; | ||||
| } | ||||
| 
 | ||||
| KResultOr<Region*> BXVGADevice::mmap(Process& process, FileDescription&, LinearAddress preferred_laddr, size_t offset, size_t size, int prot) | ||||
| KResultOr<Region*> BXVGADevice::mmap(Process& process, FileDescription&, VirtualAddress preferred_vaddr, size_t offset, size_t size, int prot) | ||||
| { | ||||
|     ASSERT(offset == 0); | ||||
|     ASSERT(size == framebuffer_size_in_bytes()); | ||||
|     auto vmo = VMObject::create_for_physical_range(framebuffer_address(), framebuffer_size_in_bytes()); | ||||
|     auto* region = process.allocate_region_with_vmo( | ||||
|         preferred_laddr, | ||||
|         preferred_vaddr, | ||||
|         framebuffer_size_in_bytes(), | ||||
|         move(vmo), | ||||
|         0, | ||||
|         "BXVGA Framebuffer", | ||||
|         prot); | ||||
|     kprintf("BXVGA: %s(%u) created Region{%p} with size %u for framebuffer P%x with laddr L%x\n", | ||||
|     kprintf("BXVGA: %s(%u) created Region{%p} with size %u for framebuffer P%x with vaddr L%x\n", | ||||
|         process.name().characters(), process.pid(), | ||||
|         region, region->size(), framebuffer_address().as_ptr(), region->laddr().get()); | ||||
|         region, region->size(), framebuffer_address().as_ptr(), region->vaddr().get()); | ||||
|     ASSERT(region); | ||||
|     return region; | ||||
| } | ||||
|  |  | |||
|  | @ -18,7 +18,7 @@ public: | |||
|     void set_y_offset(int); | ||||
| 
 | ||||
|     virtual int ioctl(FileDescription&, unsigned request, unsigned arg) override; | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, LinearAddress preferred_laddr, size_t offset, size_t, int prot) override; | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, VirtualAddress preferred_vaddr, size_t offset, size_t, int prot) override; | ||||
| 
 | ||||
|     size_t framebuffer_size_in_bytes() const { return m_framebuffer_size.area() * sizeof(dword) * 2; } | ||||
|     Size framebuffer_size() const { return m_framebuffer_size; } | ||||
|  |  | |||
|  | @ -1,7 +1,7 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <Kernel/Devices/Device.h> | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| 
 | ||||
| class BlockDevice : public Device { | ||||
| public: | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ int File::ioctl(FileDescription&, unsigned, unsigned) | |||
|     return -ENOTTY; | ||||
| } | ||||
| 
 | ||||
| KResultOr<Region*> File::mmap(Process&, FileDescription&, LinearAddress, size_t, size_t, int) | ||||
| KResultOr<Region*> File::mmap(Process&, FileDescription&, VirtualAddress, size_t, size_t, int) | ||||
| { | ||||
|     return KResult(-ENODEV); | ||||
| } | ||||
|  |  | |||
|  | @ -5,7 +5,7 @@ | |||
| #include <AK/Retained.h> | ||||
| #include <AK/Types.h> | ||||
| #include <Kernel/KResult.h> | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| #include <Kernel/UnixTypes.h> | ||||
| 
 | ||||
| class FileDescription; | ||||
|  | @ -52,7 +52,7 @@ public: | |||
|     virtual ssize_t read(FileDescription&, byte*, ssize_t) = 0; | ||||
|     virtual ssize_t write(FileDescription&, const byte*, ssize_t) = 0; | ||||
|     virtual int ioctl(FileDescription&, unsigned request, unsigned arg); | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, LinearAddress preferred_laddr, size_t offset, size_t size, int prot); | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, VirtualAddress preferred_vaddr, size_t offset, size_t size, int prot); | ||||
| 
 | ||||
|     virtual String absolute_path(const FileDescription&) const = 0; | ||||
| 
 | ||||
|  |  | |||
|  | @ -258,9 +258,9 @@ InodeMetadata FileDescription::metadata() const | |||
|     return {}; | ||||
| } | ||||
| 
 | ||||
| KResultOr<Region*> FileDescription::mmap(Process& process, LinearAddress laddr, size_t offset, size_t size, int prot) | ||||
| KResultOr<Region*> FileDescription::mmap(Process& process, VirtualAddress vaddr, size_t offset, size_t size, int prot) | ||||
| { | ||||
|     return m_file->mmap(process, *this, laddr, offset, size, prot); | ||||
|     return m_file->mmap(process, *this, vaddr, offset, size, prot); | ||||
| } | ||||
| 
 | ||||
| KResult FileDescription::truncate(off_t length) | ||||
|  |  | |||
|  | @ -8,7 +8,7 @@ | |||
| #include <Kernel/FileSystem/Inode.h> | ||||
| #include <Kernel/FileSystem/InodeMetadata.h> | ||||
| #include <Kernel/FileSystem/VirtualFileSystem.h> | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| #include <Kernel/Net/Socket.h> | ||||
| 
 | ||||
| class File; | ||||
|  | @ -67,7 +67,7 @@ public: | |||
|     Custody* custody() { return m_custody.ptr(); } | ||||
|     const Custody* custody() const { return m_custody.ptr(); } | ||||
| 
 | ||||
|     KResultOr<Region*> mmap(Process&, LinearAddress, size_t offset, size_t, int prot); | ||||
|     KResultOr<Region*> mmap(Process&, VirtualAddress, size_t offset, size_t, int prot); | ||||
| 
 | ||||
|     bool is_blocking() const { return m_is_blocking; } | ||||
|     void set_blocking(bool b) { m_is_blocking = b; } | ||||
|  |  | |||
|  | @ -23,12 +23,12 @@ ssize_t InodeFile::write(FileDescription& descriptor, const byte* data, ssize_t | |||
|     return m_inode->write_bytes(descriptor.offset(), count, data, &descriptor); | ||||
| } | ||||
| 
 | ||||
| KResultOr<Region*> InodeFile::mmap(Process& process, FileDescription& descriptor, LinearAddress preferred_laddr, size_t offset, size_t size, int prot) | ||||
| KResultOr<Region*> InodeFile::mmap(Process& process, FileDescription& descriptor, VirtualAddress preferred_vaddr, size_t offset, size_t size, int prot) | ||||
| { | ||||
|     ASSERT(offset == 0); | ||||
|     // FIXME: If PROT_EXEC, check that the underlying file system isn't mounted noexec.
 | ||||
|     InterruptDisabler disabler; | ||||
|     auto* region = process.allocate_file_backed_region(preferred_laddr, size, inode(), descriptor.absolute_path(), prot); | ||||
|     auto* region = process.allocate_file_backed_region(preferred_vaddr, size, inode(), descriptor.absolute_path(), prot); | ||||
|     if (!region) | ||||
|         return KResult(-ENOMEM); | ||||
|     return region; | ||||
|  |  | |||
|  | @ -21,7 +21,7 @@ public: | |||
| 
 | ||||
|     virtual ssize_t read(FileDescription&, byte*, ssize_t) override; | ||||
|     virtual ssize_t write(FileDescription&, const byte*, ssize_t) override; | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, LinearAddress preferred_laddr, size_t offset, size_t size, int prot) override; | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, VirtualAddress preferred_vaddr, size_t offset, size_t size, int prot) override; | ||||
| 
 | ||||
|     virtual String absolute_path(const FileDescription&) const override; | ||||
| 
 | ||||
|  |  | |||
|  | @ -227,8 +227,8 @@ ByteBuffer procfs$pid_vm(InodeIdentifier identifier) | |||
|         if (region->is_writable()) | ||||
|             flags_builder.append('W'); | ||||
|         builder.appendf("%x -- %x    %x  %x   % 4s   %s\n", | ||||
|             region->laddr().get(), | ||||
|             region->laddr().offset(region->size() - 1).get(), | ||||
|             region->vaddr().get(), | ||||
|             region->vaddr().offset(region->size() - 1).get(), | ||||
|             region->size(), | ||||
|             region->amount_resident(), | ||||
|             flags_builder.to_string().characters(), | ||||
|  | @ -263,8 +263,8 @@ ByteBuffer procfs$pid_vmo(InodeIdentifier identifier) | |||
|     builder.appendf("BEGIN       END         SIZE        NAME\n"); | ||||
|     for (auto& region : process.regions()) { | ||||
|         builder.appendf("%x -- %x    %x    %s\n", | ||||
|             region->laddr().get(), | ||||
|             region->laddr().offset(region->size() - 1).get(), | ||||
|             region->vaddr().get(), | ||||
|             region->vaddr().offset(region->size() - 1).get(), | ||||
|             region->size(), | ||||
|             region->name().characters()); | ||||
|         builder.appendf("VMO: %s \"%s\" @ %x(%u)\n", | ||||
|  | @ -300,7 +300,7 @@ ByteBuffer procfs$pid_stack(InodeIdentifier identifier) | |||
|         builder.appendf("Thread %d:\n", thread.tid()); | ||||
|         Vector<RecognizedSymbol, 64> recognized_symbols; | ||||
|         recognized_symbols.append({ thread.tss().eip, ksymbolicate(thread.tss().eip) }); | ||||
|         for (dword* stack_ptr = (dword*)thread.frame_ptr(); process.validate_read_from_kernel(LinearAddress((dword)stack_ptr)); stack_ptr = (dword*)*stack_ptr) { | ||||
|         for (dword* stack_ptr = (dword*)thread.frame_ptr(); process.validate_read_from_kernel(VirtualAddress((dword)stack_ptr)); stack_ptr = (dword*)*stack_ptr) { | ||||
|             dword retaddr = stack_ptr[1]; | ||||
|             recognized_symbols.append({ retaddr, ksymbolicate(retaddr) }); | ||||
|         } | ||||
|  |  | |||
|  | @ -94,12 +94,12 @@ static void load_ksyms_from_data(const ByteBuffer& buffer) | |||
|     RecognizedSymbol recognized_symbols[max_recognized_symbol_count]; | ||||
|     int recognized_symbol_count = 0; | ||||
|     if (use_ksyms) { | ||||
|         for (dword* stack_ptr = (dword*)ebp; current->process().validate_read_from_kernel(LinearAddress((dword)stack_ptr)); stack_ptr = (dword*)*stack_ptr) { | ||||
|         for (dword* stack_ptr = (dword*)ebp; current->process().validate_read_from_kernel(VirtualAddress((dword)stack_ptr)); stack_ptr = (dword*)*stack_ptr) { | ||||
|             dword retaddr = stack_ptr[1]; | ||||
|             recognized_symbols[recognized_symbol_count++] = { retaddr, ksymbolicate(retaddr) }; | ||||
|         } | ||||
|     } else { | ||||
|         for (dword* stack_ptr = (dword*)ebp; current->process().validate_read_from_kernel(LinearAddress((dword)stack_ptr)); stack_ptr = (dword*)*stack_ptr) { | ||||
|         for (dword* stack_ptr = (dword*)ebp; current->process().validate_read_from_kernel(VirtualAddress((dword)stack_ptr)); stack_ptr = (dword*)*stack_ptr) { | ||||
|             dword retaddr = stack_ptr[1]; | ||||
|             dbgprintf("%x (next: %x)\n", retaddr, stack_ptr ? (dword*)*stack_ptr : 0); | ||||
|         } | ||||
|  |  | |||
|  | @ -1,39 +0,0 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <AK/Types.h> | ||||
| 
 | ||||
| class LinearAddress { | ||||
| public: | ||||
|     LinearAddress() {} | ||||
|     explicit LinearAddress(dword address) | ||||
|         : m_address(address) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     bool is_null() const { return m_address == 0; } | ||||
| 
 | ||||
|     LinearAddress offset(dword o) const { return LinearAddress(m_address + o); } | ||||
|     dword get() const { return m_address; } | ||||
|     void set(dword address) { m_address = address; } | ||||
|     void mask(dword m) { m_address &= m; } | ||||
| 
 | ||||
|     bool operator<=(const LinearAddress& other) const { return m_address <= other.m_address; } | ||||
|     bool operator>=(const LinearAddress& other) const { return m_address >= other.m_address; } | ||||
|     bool operator>(const LinearAddress& other) const { return m_address > other.m_address; } | ||||
|     bool operator<(const LinearAddress& other) const { return m_address < other.m_address; } | ||||
|     bool operator==(const LinearAddress& other) const { return m_address == other.m_address; } | ||||
|     bool operator!=(const LinearAddress& other) const { return m_address != other.m_address; } | ||||
| 
 | ||||
|     byte* as_ptr() { return reinterpret_cast<byte*>(m_address); } | ||||
|     const byte* as_ptr() const { return reinterpret_cast<const byte*>(m_address); } | ||||
| 
 | ||||
|     dword page_base() const { return m_address & 0xfffff000; } | ||||
| 
 | ||||
| private: | ||||
|     dword m_address { 0 }; | ||||
| }; | ||||
| 
 | ||||
| inline LinearAddress operator-(const LinearAddress& a, const LinearAddress& b) | ||||
| { | ||||
|     return LinearAddress(a.get() - b.get()); | ||||
| } | ||||
|  | @ -112,11 +112,11 @@ E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address pci_address, byte irq) | |||
|     enable_bus_mastering(m_pci_address); | ||||
| 
 | ||||
|     m_mmio_base = PhysicalAddress(PCI::get_BAR0(m_pci_address)); | ||||
|     MM.map_for_kernel(LinearAddress(m_mmio_base.get()), m_mmio_base); | ||||
|     MM.map_for_kernel(LinearAddress(m_mmio_base.offset(4096).get()), m_mmio_base.offset(4096)); | ||||
|     MM.map_for_kernel(LinearAddress(m_mmio_base.offset(8192).get()), m_mmio_base.offset(8192)); | ||||
|     MM.map_for_kernel(LinearAddress(m_mmio_base.offset(12288).get()), m_mmio_base.offset(12288)); | ||||
|     MM.map_for_kernel(LinearAddress(m_mmio_base.offset(16384).get()), m_mmio_base.offset(16384)); | ||||
|     MM.map_for_kernel(VirtualAddress(m_mmio_base.get()), m_mmio_base); | ||||
|     MM.map_for_kernel(VirtualAddress(m_mmio_base.offset(4096).get()), m_mmio_base.offset(4096)); | ||||
|     MM.map_for_kernel(VirtualAddress(m_mmio_base.offset(8192).get()), m_mmio_base.offset(8192)); | ||||
|     MM.map_for_kernel(VirtualAddress(m_mmio_base.offset(12288).get()), m_mmio_base.offset(12288)); | ||||
|     MM.map_for_kernel(VirtualAddress(m_mmio_base.offset(16384).get()), m_mmio_base.offset(16384)); | ||||
|     m_use_mmio = true; | ||||
|     m_io_base = PCI::get_BAR1(m_pci_address) & ~1; | ||||
|     m_interrupt_line = PCI::get_interrupt_line(m_pci_address); | ||||
|  |  | |||
|  | @ -72,13 +72,13 @@ bool Process::in_group(gid_t gid) const | |||
|     return m_gids.contains(gid); | ||||
| } | ||||
| 
 | ||||
| Range Process::allocate_range(LinearAddress laddr, size_t size) | ||||
| Range Process::allocate_range(VirtualAddress vaddr, size_t size) | ||||
| { | ||||
|     laddr.mask(PAGE_MASK); | ||||
|     vaddr.mask(PAGE_MASK); | ||||
|     size = PAGE_ROUND_UP(size); | ||||
|     if (laddr.is_null()) | ||||
|     if (vaddr.is_null()) | ||||
|         return page_directory().range_allocator().allocate_anywhere(size); | ||||
|     return page_directory().range_allocator().allocate_specific(laddr, size); | ||||
|     return page_directory().range_allocator().allocate_specific(vaddr, size); | ||||
| } | ||||
| 
 | ||||
| static unsigned prot_to_region_access_flags(int prot) | ||||
|  | @ -93,9 +93,9 @@ static unsigned prot_to_region_access_flags(int prot) | |||
|     return access; | ||||
| } | ||||
| 
 | ||||
| Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name, int prot, bool commit) | ||||
| Region* Process::allocate_region(VirtualAddress vaddr, size_t size, String&& name, int prot, bool commit) | ||||
| { | ||||
|     auto range = allocate_range(laddr, size); | ||||
|     auto range = allocate_range(vaddr, size); | ||||
|     if (!range.is_valid()) | ||||
|         return nullptr; | ||||
|     m_regions.append(adopt(*new Region(range, move(name), prot_to_region_access_flags(prot)))); | ||||
|  | @ -105,9 +105,9 @@ Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name | |||
|     return m_regions.last().ptr(); | ||||
| } | ||||
| 
 | ||||
| Region* Process::allocate_file_backed_region(LinearAddress laddr, size_t size, RetainPtr<Inode>&& inode, String&& name, int prot) | ||||
| Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size, RetainPtr<Inode>&& inode, String&& name, int prot) | ||||
| { | ||||
|     auto range = allocate_range(laddr, size); | ||||
|     auto range = allocate_range(vaddr, size); | ||||
|     if (!range.is_valid()) | ||||
|         return nullptr; | ||||
|     m_regions.append(adopt(*new Region(range, move(inode), move(name), prot_to_region_access_flags(prot)))); | ||||
|  | @ -115,9 +115,9 @@ Region* Process::allocate_file_backed_region(LinearAddress laddr, size_t size, R | |||
|     return m_regions.last().ptr(); | ||||
| } | ||||
| 
 | ||||
| Region* Process::allocate_region_with_vmo(LinearAddress laddr, size_t size, Retained<VMObject>&& vmo, size_t offset_in_vmo, String&& name, int prot) | ||||
| Region* Process::allocate_region_with_vmo(VirtualAddress vaddr, size_t size, Retained<VMObject>&& vmo, size_t offset_in_vmo, String&& name, int prot) | ||||
| { | ||||
|     auto range = allocate_range(laddr, size); | ||||
|     auto range = allocate_range(vaddr, size); | ||||
|     if (!range.is_valid()) | ||||
|         return nullptr; | ||||
|     offset_in_vmo &= PAGE_MASK; | ||||
|  | @ -131,7 +131,7 @@ bool Process::deallocate_region(Region& region) | |||
|     InterruptDisabler disabler; | ||||
|     for (int i = 0; i < m_regions.size(); ++i) { | ||||
|         if (m_regions[i] == ®ion) { | ||||
|             page_directory().range_allocator().deallocate({ region.laddr(), region.size() }); | ||||
|             page_directory().range_allocator().deallocate({ region.vaddr(), region.size() }); | ||||
|             MM.unmap_region(region); | ||||
|             m_regions.remove(i); | ||||
|             return true; | ||||
|  | @ -140,11 +140,11 @@ bool Process::deallocate_region(Region& region) | |||
|     return false; | ||||
| } | ||||
| 
 | ||||
| Region* Process::region_from_range(LinearAddress laddr, size_t size) | ||||
| Region* Process::region_from_range(VirtualAddress vaddr, size_t size) | ||||
| { | ||||
|     size = PAGE_ROUND_UP(size); | ||||
|     for (auto& region : m_regions) { | ||||
|         if (region->laddr() == laddr && region->size() == size) | ||||
|         if (region->vaddr() == vaddr && region->size() == size) | ||||
|             return region.ptr(); | ||||
|     } | ||||
|     return nullptr; | ||||
|  | @ -154,7 +154,7 @@ int Process::sys$set_mmap_name(void* addr, size_t size, const char* name) | |||
| { | ||||
|     if (!validate_read_str(name)) | ||||
|         return -EFAULT; | ||||
|     auto* region = region_from_range(LinearAddress((dword)addr), size); | ||||
|     auto* region = region_from_range(VirtualAddress((dword)addr), size); | ||||
|     if (!region) | ||||
|         return -EINVAL; | ||||
|     region->set_name(String(name)); | ||||
|  | @ -179,21 +179,21 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* params) | |||
|     if ((dword)addr & ~PAGE_MASK) | ||||
|         return (void*)-EINVAL; | ||||
|     if (flags & MAP_ANONYMOUS) { | ||||
|         auto* region = allocate_region(LinearAddress((dword)addr), size, "mmap", prot, false); | ||||
|         auto* region = allocate_region(VirtualAddress((dword)addr), size, "mmap", prot, false); | ||||
|         if (!region) | ||||
|             return (void*)-ENOMEM; | ||||
|         if (flags & MAP_SHARED) | ||||
|             region->set_shared(true); | ||||
|         if (name) | ||||
|             region->set_name(name); | ||||
|         return region->laddr().as_ptr(); | ||||
|         return region->vaddr().as_ptr(); | ||||
|     } | ||||
|     if (offset & ~PAGE_MASK) | ||||
|         return (void*)-EINVAL; | ||||
|     auto* descriptor = file_description(fd); | ||||
|     if (!descriptor) | ||||
|         return (void*)-EBADF; | ||||
|     auto region_or_error = descriptor->mmap(*this, LinearAddress((dword)addr), offset, size, prot); | ||||
|     auto region_or_error = descriptor->mmap(*this, VirtualAddress((dword)addr), offset, size, prot); | ||||
|     if (region_or_error.is_error()) | ||||
|         return (void*)(int)region_or_error.error(); | ||||
|     auto region = region_or_error.value(); | ||||
|  | @ -201,12 +201,12 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* params) | |||
|         region->set_shared(true); | ||||
|     if (name) | ||||
|         region->set_name(name); | ||||
|     return region->laddr().as_ptr(); | ||||
|     return region->vaddr().as_ptr(); | ||||
| } | ||||
| 
 | ||||
| int Process::sys$munmap(void* addr, size_t size) | ||||
| { | ||||
|     auto* region = region_from_range(LinearAddress((dword)addr), size); | ||||
|     auto* region = region_from_range(VirtualAddress((dword)addr), size); | ||||
|     if (!region) | ||||
|         return -EINVAL; | ||||
|     if (!deallocate_region(*region)) | ||||
|  | @ -239,7 +239,7 @@ Process* Process::fork(RegisterDump& regs) | |||
| 
 | ||||
|     for (auto& region : m_regions) { | ||||
| #ifdef FORK_DEBUG | ||||
|         dbgprintf("fork: cloning Region{%p} \"%s\" L%x\n", region.ptr(), region->name().characters(), region->laddr().get()); | ||||
|         dbgprintf("fork: cloning Region{%p} \"%s\" L%x\n", region.ptr(), region->name().characters(), region->vaddr().get()); | ||||
| #endif | ||||
|         auto cloned_region = region->clone(); | ||||
|         child->m_regions.append(move(cloned_region)); | ||||
|  | @ -334,7 +334,7 @@ int Process::do_exec(String path, Vector<String> arguments, Vector<String> envir | |||
| 
 | ||||
|     auto vmo = VMObject::create_file_backed(descriptor->inode()); | ||||
|     vmo->set_name(descriptor->absolute_path()); | ||||
|     RetainPtr<Region> region = allocate_region_with_vmo(LinearAddress(), metadata.size, vmo.copy_ref(), 0, vmo->name(), PROT_READ); | ||||
|     RetainPtr<Region> region = allocate_region_with_vmo(VirtualAddress(), metadata.size, vmo.copy_ref(), 0, vmo->name(), PROT_READ); | ||||
|     ASSERT(region); | ||||
| 
 | ||||
|     if (this != ¤t->process()) { | ||||
|  | @ -347,8 +347,8 @@ int Process::do_exec(String path, Vector<String> arguments, Vector<String> envir | |||
|         // Okay, here comes the sleight of hand, pay close attention..
 | ||||
|         auto old_regions = move(m_regions); | ||||
|         m_regions.append(*region); | ||||
|         loader = make<ELFLoader>(region->laddr().as_ptr()); | ||||
|         loader->map_section_hook = [&](LinearAddress laddr, size_t size, size_t alignment, size_t offset_in_image, bool is_readable, bool is_writable, bool is_executable, const String& name) { | ||||
|         loader = make<ELFLoader>(region->vaddr().as_ptr()); | ||||
|         loader->map_section_hook = [&](VirtualAddress vaddr, size_t size, size_t alignment, size_t offset_in_image, bool is_readable, bool is_writable, bool is_executable, const String& name) { | ||||
|             ASSERT(size); | ||||
|             ASSERT(alignment == PAGE_SIZE); | ||||
|             int prot = 0; | ||||
|  | @ -358,10 +358,10 @@ int Process::do_exec(String path, Vector<String> arguments, Vector<String> envir | |||
|                 prot |= PROT_WRITE; | ||||
|             if (is_executable) | ||||
|                 prot |= PROT_EXEC; | ||||
|             (void)allocate_region_with_vmo(laddr, size, vmo.copy_ref(), offset_in_image, String(name), prot); | ||||
|             return laddr.as_ptr(); | ||||
|             (void)allocate_region_with_vmo(vaddr, size, vmo.copy_ref(), offset_in_image, String(name), prot); | ||||
|             return vaddr.as_ptr(); | ||||
|         }; | ||||
|         loader->alloc_section_hook = [&](LinearAddress laddr, size_t size, size_t alignment, bool is_readable, bool is_writable, const String& name) { | ||||
|         loader->alloc_section_hook = [&](VirtualAddress vaddr, size_t size, size_t alignment, bool is_readable, bool is_writable, const String& name) { | ||||
|             ASSERT(size); | ||||
|             ASSERT(alignment == PAGE_SIZE); | ||||
|             int prot = 0; | ||||
|  | @ -369,8 +369,8 @@ int Process::do_exec(String path, Vector<String> arguments, Vector<String> envir | |||
|                 prot |= PROT_READ; | ||||
|             if (is_writable) | ||||
|                 prot |= PROT_WRITE; | ||||
|             (void)allocate_region(laddr, size, String(name), prot); | ||||
|             return laddr.as_ptr(); | ||||
|             (void)allocate_region(vaddr, size, String(name), prot); | ||||
|             return vaddr.as_ptr(); | ||||
|         }; | ||||
|         bool success = loader->load(); | ||||
|         if (!success || !loader->entry().get()) { | ||||
|  | @ -649,8 +649,8 @@ void Process::dump_regions() | |||
|     kprintf("BEGIN       END         SIZE        NAME\n"); | ||||
|     for (auto& region : m_regions) { | ||||
|         kprintf("%x -- %x    %x    %s\n", | ||||
|             region->laddr().get(), | ||||
|             region->laddr().offset(region->size() - 1).get(), | ||||
|             region->vaddr().get(), | ||||
|             region->vaddr().offset(region->size() - 1).get(), | ||||
|             region->size(), | ||||
|             region->name().characters()); | ||||
|     } | ||||
|  | @ -677,8 +677,8 @@ void Process::create_signal_trampolines_if_needed() | |||
|         return; | ||||
|     // FIXME: This should be a global trampoline shared by all processes, not one created per process!
 | ||||
|     // FIXME: Remap as read-only after setup.
 | ||||
|     auto* region = allocate_region(LinearAddress(), PAGE_SIZE, "Signal trampolines", PROT_READ | PROT_WRITE | PROT_EXEC); | ||||
|     m_return_to_ring3_from_signal_trampoline = region->laddr(); | ||||
|     auto* region = allocate_region(VirtualAddress(), PAGE_SIZE, "Signal trampolines", PROT_READ | PROT_WRITE | PROT_EXEC); | ||||
|     m_return_to_ring3_from_signal_trampoline = region->vaddr(); | ||||
|     byte* code_ptr = m_return_to_ring3_from_signal_trampoline.as_ptr(); | ||||
|     *code_ptr++ = 0x58; // pop eax (Argument to signal handler (ignored here))
 | ||||
|     *code_ptr++ = 0x5a; // pop edx (Original signal mask to restore)
 | ||||
|  | @ -698,7 +698,7 @@ void Process::create_signal_trampolines_if_needed() | |||
|     *code_ptr++ = 0x0f; // ud2
 | ||||
|     *code_ptr++ = 0x0b; | ||||
| 
 | ||||
|     m_return_to_ring0_from_signal_trampoline = LinearAddress((dword)code_ptr); | ||||
|     m_return_to_ring0_from_signal_trampoline = VirtualAddress((dword)code_ptr); | ||||
|     *code_ptr++ = 0x58; // pop eax (Argument to signal handler (ignored here))
 | ||||
|     *code_ptr++ = 0x5a; // pop edx (Original signal mask to restore)
 | ||||
|     *code_ptr++ = 0xb8; // mov eax, <dword>
 | ||||
|  | @ -1448,7 +1448,7 @@ enum class KernelMemoryCheckResult | |||
|     AccessDenied | ||||
| }; | ||||
| 
 | ||||
| static KernelMemoryCheckResult check_kernel_memory_access(LinearAddress laddr, bool is_write) | ||||
| static KernelMemoryCheckResult check_kernel_memory_access(VirtualAddress vaddr, bool is_write) | ||||
| { | ||||
|     auto& sections = multiboot_info_ptr->u.elf_sec; | ||||
| 
 | ||||
|  | @ -1457,7 +1457,7 @@ static KernelMemoryCheckResult check_kernel_memory_access(LinearAddress laddr, b | |||
|         auto& segment = kernel_program_headers[i]; | ||||
|         if (segment.p_type != PT_LOAD || !segment.p_vaddr || !segment.p_memsz) | ||||
|             continue; | ||||
|         if (laddr.get() < segment.p_vaddr || laddr.get() > (segment.p_vaddr + segment.p_memsz)) | ||||
|         if (vaddr.get() < segment.p_vaddr || vaddr.get() > (segment.p_vaddr + segment.p_memsz)) | ||||
|             continue; | ||||
|         if (is_write && !(kernel_program_headers[i].p_flags & PF_W)) | ||||
|             return KernelMemoryCheckResult::AccessDenied; | ||||
|  | @ -1468,20 +1468,20 @@ static KernelMemoryCheckResult check_kernel_memory_access(LinearAddress laddr, b | |||
|     return KernelMemoryCheckResult::NotInsideKernelMemory; | ||||
| } | ||||
| 
 | ||||
| bool Process::validate_read_from_kernel(LinearAddress laddr) const | ||||
| bool Process::validate_read_from_kernel(VirtualAddress vaddr) const | ||||
| { | ||||
|     if (laddr.is_null()) | ||||
|     if (vaddr.is_null()) | ||||
|         return false; | ||||
|     // We check extra carefully here since the first 4MB of the address space is identity-mapped.
 | ||||
|     // This code allows access outside of the known used address ranges to get caught.
 | ||||
|     auto kmc_result = check_kernel_memory_access(laddr, false); | ||||
|     auto kmc_result = check_kernel_memory_access(vaddr, false); | ||||
|     if (kmc_result == KernelMemoryCheckResult::AccessGranted) | ||||
|         return true; | ||||
|     if (kmc_result == KernelMemoryCheckResult::AccessDenied) | ||||
|         return false; | ||||
|     if (is_kmalloc_address(laddr.as_ptr())) | ||||
|     if (is_kmalloc_address(vaddr.as_ptr())) | ||||
|         return true; | ||||
|     return validate_read(laddr.as_ptr(), 1); | ||||
|     return validate_read(vaddr.as_ptr(), 1); | ||||
| } | ||||
| 
 | ||||
| bool Process::validate_read_str(const char* str) | ||||
|  | @ -1494,8 +1494,8 @@ bool Process::validate_read_str(const char* str) | |||
| bool Process::validate_read(const void* address, ssize_t size) const | ||||
| { | ||||
|     ASSERT(size >= 0); | ||||
|     LinearAddress first_address((dword)address); | ||||
|     LinearAddress last_address = first_address.offset(size - 1); | ||||
|     VirtualAddress first_address((dword)address); | ||||
|     VirtualAddress last_address = first_address.offset(size - 1); | ||||
|     if (is_ring0()) { | ||||
|         auto kmc_result = check_kernel_memory_access(first_address, false); | ||||
|         if (kmc_result == KernelMemoryCheckResult::AccessGranted) | ||||
|  | @ -1518,8 +1518,8 @@ bool Process::validate_read(const void* address, ssize_t size) const | |||
| bool Process::validate_write(void* address, ssize_t size) const | ||||
| { | ||||
|     ASSERT(size >= 0); | ||||
|     LinearAddress first_address((dword)address); | ||||
|     LinearAddress last_address = first_address.offset(size - 1); | ||||
|     VirtualAddress first_address((dword)address); | ||||
|     VirtualAddress last_address = first_address.offset(size - 1); | ||||
|     if (is_ring0()) { | ||||
|         if (is_kmalloc_address(address)) | ||||
|             return true; | ||||
|  | @ -1698,7 +1698,7 @@ int Process::sys$sigaction(int signum, const sigaction* act, sigaction* old_act) | |||
|         old_act->sa_sigaction = (decltype(old_act->sa_sigaction))action.handler_or_sigaction.get(); | ||||
|     } | ||||
|     action.flags = act->sa_flags; | ||||
|     action.handler_or_sigaction = LinearAddress((dword)act->sa_sigaction); | ||||
|     action.handler_or_sigaction = VirtualAddress((dword)act->sa_sigaction); | ||||
|     return 0; | ||||
| } | ||||
| 
 | ||||
|  | @ -2363,17 +2363,17 @@ struct SharedBuffer { | |||
|         if (m_pid1 == process.pid()) { | ||||
|             ++m_pid1_retain_count; | ||||
|             if (!m_pid1_region) { | ||||
|                 m_pid1_region = process.allocate_region_with_vmo(LinearAddress(), size(), m_vmo.copy_ref(), 0, "SharedBuffer", PROT_READ | (m_pid1_writable ? PROT_WRITE : 0)); | ||||
|                 m_pid1_region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmo.copy_ref(), 0, "SharedBuffer", PROT_READ | (m_pid1_writable ? PROT_WRITE : 0)); | ||||
|                 m_pid1_region->set_shared(true); | ||||
|             } | ||||
|             return m_pid1_region->laddr().as_ptr(); | ||||
|             return m_pid1_region->vaddr().as_ptr(); | ||||
|         } else if (m_pid2 == process.pid()) { | ||||
|             ++m_pid2_retain_count; | ||||
|             if (!m_pid2_region) { | ||||
|                 m_pid2_region = process.allocate_region_with_vmo(LinearAddress(), size(), m_vmo.copy_ref(), 0, "SharedBuffer", PROT_READ | (m_pid2_writable ? PROT_WRITE : 0)); | ||||
|                 m_pid2_region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmo.copy_ref(), 0, "SharedBuffer", PROT_READ | (m_pid2_writable ? PROT_WRITE : 0)); | ||||
|                 m_pid2_region->set_shared(true); | ||||
|             } | ||||
|             return m_pid2_region->laddr().as_ptr(); | ||||
|             return m_pid2_region->vaddr().as_ptr(); | ||||
|         } | ||||
|         return nullptr; | ||||
|     } | ||||
|  | @ -2499,9 +2499,9 @@ int Process::sys$create_shared_buffer(pid_t peer_pid, int size, void** buffer) | |||
|     auto shared_buffer = make<SharedBuffer>(m_pid, peer_pid, size); | ||||
|     shared_buffer->m_shared_buffer_id = shared_buffer_id; | ||||
|     ASSERT(shared_buffer->size() >= size); | ||||
|     shared_buffer->m_pid1_region = allocate_region_with_vmo(LinearAddress(), shared_buffer->size(), shared_buffer->m_vmo.copy_ref(), 0, "SharedBuffer", PROT_READ | PROT_WRITE); | ||||
|     shared_buffer->m_pid1_region = allocate_region_with_vmo(VirtualAddress(), shared_buffer->size(), shared_buffer->m_vmo.copy_ref(), 0, "SharedBuffer", PROT_READ | PROT_WRITE); | ||||
|     shared_buffer->m_pid1_region->set_shared(true); | ||||
|     *buffer = shared_buffer->m_pid1_region->laddr().as_ptr(); | ||||
|     *buffer = shared_buffer->m_pid1_region->vaddr().as_ptr(); | ||||
| #ifdef SHARED_BUFFER_DEBUG | ||||
|     kprintf("%s(%u): Created shared buffer %d (%u bytes, vmo is %u) for sharing with %d\n", name().characters(), pid(), shared_buffer_id, size, shared_buffer->size(), peer_pid); | ||||
| #endif | ||||
|  |  | |||
|  | @ -225,7 +225,7 @@ public: | |||
|     dword m_ticks_in_user_for_dead_children { 0 }; | ||||
|     dword m_ticks_in_kernel_for_dead_children { 0 }; | ||||
| 
 | ||||
|     bool validate_read_from_kernel(LinearAddress) const; | ||||
|     bool validate_read_from_kernel(VirtualAddress) const; | ||||
| 
 | ||||
|     bool validate_read(const void*, ssize_t) const; | ||||
|     bool validate_write(void*, ssize_t) const; | ||||
|  | @ -250,9 +250,9 @@ public: | |||
| 
 | ||||
|     bool is_superuser() const { return m_euid == 0; } | ||||
| 
 | ||||
|     Region* allocate_region_with_vmo(LinearAddress, size_t, Retained<VMObject>&&, size_t offset_in_vmo, String&& name, int prot); | ||||
|     Region* allocate_file_backed_region(LinearAddress, size_t, RetainPtr<Inode>&&, String&& name, int prot); | ||||
|     Region* allocate_region(LinearAddress, size_t, String&& name, int prot = PROT_READ | PROT_WRITE, bool commit = true); | ||||
|     Region* allocate_region_with_vmo(VirtualAddress, size_t, Retained<VMObject>&&, size_t offset_in_vmo, String&& name, int prot); | ||||
|     Region* allocate_file_backed_region(VirtualAddress, size_t, RetainPtr<Inode>&&, String&& name, int prot); | ||||
|     Region* allocate_region(VirtualAddress, size_t, String&& name, int prot = PROT_READ | PROT_WRITE, bool commit = true); | ||||
|     bool deallocate_region(Region& region); | ||||
| 
 | ||||
|     void set_being_inspected(bool b) { m_being_inspected = b; } | ||||
|  | @ -277,7 +277,7 @@ private: | |||
| 
 | ||||
|     Process(String&& name, uid_t, gid_t, pid_t ppid, RingLevel, RetainPtr<Custody>&& cwd = nullptr, RetainPtr<Custody>&& executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr); | ||||
| 
 | ||||
|     Range allocate_range(LinearAddress, size_t); | ||||
|     Range allocate_range(VirtualAddress, size_t); | ||||
| 
 | ||||
|     int do_exec(String path, Vector<String> arguments, Vector<String> environment); | ||||
|     ssize_t do_write(FileDescription&, const byte*, int data_size); | ||||
|  | @ -326,12 +326,12 @@ private: | |||
| 
 | ||||
|     TTY* m_tty { nullptr }; | ||||
| 
 | ||||
|     Region* region_from_range(LinearAddress, size_t); | ||||
|     Region* region_from_range(VirtualAddress, size_t); | ||||
| 
 | ||||
|     Vector<Retained<Region>> m_regions; | ||||
| 
 | ||||
|     LinearAddress m_return_to_ring3_from_signal_trampoline; | ||||
|     LinearAddress m_return_to_ring0_from_signal_trampoline; | ||||
|     VirtualAddress m_return_to_ring3_from_signal_trampoline; | ||||
|     VirtualAddress m_return_to_ring0_from_signal_trampoline; | ||||
| 
 | ||||
|     pid_t m_ppid { 0 }; | ||||
|     mode_t m_umask { 022 }; | ||||
|  |  | |||
|  | @ -89,9 +89,9 @@ int SharedMemory::write(FileDescription&, const byte* data, int data_size) | |||
|     ASSERT_NOT_REACHED(); | ||||
| } | ||||
| 
 | ||||
| KResultOr<Region*> SharedMemory::mmap(Process& process, FileDescription&, LinearAddress laddr, size_t offset, size_t size, int prot) | ||||
| KResultOr<Region*> SharedMemory::mmap(Process& process, FileDescription&, VirtualAddress vaddr, size_t offset, size_t size, int prot) | ||||
| { | ||||
|     if (!vmo()) | ||||
|         return KResult(-ENODEV); | ||||
|     return process.allocate_region_with_vmo(laddr, size, *vmo(), offset, name(), prot); | ||||
|     return process.allocate_region_with_vmo(vaddr, size, *vmo(), offset, name(), prot); | ||||
| } | ||||
|  |  | |||
|  | @ -31,7 +31,7 @@ private: | |||
|     virtual String absolute_path(const FileDescription&) const override; | ||||
|     virtual const char* class_name() const override { return "SharedMemory"; } | ||||
|     virtual bool is_shared_memory() const override { return true; } | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, LinearAddress, size_t offset, size_t size, int prot) override; | ||||
|     virtual KResultOr<Region*> mmap(Process&, FileDescription&, VirtualAddress, size_t offset, size_t size, int prot) override; | ||||
| 
 | ||||
|     SharedMemory(const String& name, uid_t, gid_t, mode_t); | ||||
| 
 | ||||
|  |  | |||
|  | @ -63,9 +63,9 @@ Thread::Thread(Process& process) | |||
|     } else { | ||||
|         // Ring3 processes need a separate stack for Ring0.
 | ||||
|         m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid)); | ||||
|         m_kernel_stack_base = m_kernel_stack_region->laddr().get(); | ||||
|         m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); | ||||
|         m_tss.ss0 = 0x10; | ||||
|         m_tss.esp0 = m_kernel_stack_region->laddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; | ||||
|         m_tss.esp0 = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; | ||||
|     } | ||||
| 
 | ||||
|     // HACK: Ring2 SS in the TSS is the current PID.
 | ||||
|  | @ -332,8 +332,8 @@ ShouldUnblockThread Thread::dispatch_signal(byte signal) | |||
|     if (signal == SIGCONT && state() == Stopped) | ||||
|         set_state(Runnable); | ||||
| 
 | ||||
|     auto handler_laddr = action.handler_or_sigaction; | ||||
|     if (handler_laddr.is_null()) { | ||||
|     auto handler_vaddr = action.handler_or_sigaction; | ||||
|     if (handler_vaddr.is_null()) { | ||||
|         switch (default_signal_action(signal)) { | ||||
|         case DefaultSignalAction::Stop: | ||||
|             set_state(Stopped); | ||||
|  | @ -352,7 +352,7 @@ ShouldUnblockThread Thread::dispatch_signal(byte signal) | |||
|         ASSERT_NOT_REACHED(); | ||||
|     } | ||||
| 
 | ||||
|     if (handler_laddr.as_ptr() == SIG_IGN) { | ||||
|     if (handler_vaddr.as_ptr() == SIG_IGN) { | ||||
| #ifdef SIGNAL_DEBUG | ||||
|         kprintf("%s(%u) ignored signal %u\n", process().name().characters(), pid(), signal); | ||||
| #endif | ||||
|  | @ -389,15 +389,15 @@ ShouldUnblockThread Thread::dispatch_signal(byte signal) | |||
| #endif | ||||
| 
 | ||||
|         if (!m_signal_stack_user_region) { | ||||
|             m_signal_stack_user_region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, String::format("User Signal Stack (Thread %d)", m_tid)); | ||||
|             m_signal_stack_user_region = m_process.allocate_region(VirtualAddress(), default_userspace_stack_size, String::format("User Signal Stack (Thread %d)", m_tid)); | ||||
|             ASSERT(m_signal_stack_user_region); | ||||
|         } | ||||
|         if (!m_kernel_stack_for_signal_handler_region) | ||||
|             m_kernel_stack_for_signal_handler_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Signal Stack (Thread %d)", m_tid)); | ||||
|         m_tss.ss = 0x23; | ||||
|         m_tss.esp = m_signal_stack_user_region->laddr().offset(default_userspace_stack_size).get(); | ||||
|         m_tss.esp = m_signal_stack_user_region->vaddr().offset(default_userspace_stack_size).get(); | ||||
|         m_tss.ss0 = 0x10; | ||||
|         m_tss.esp0 = m_kernel_stack_for_signal_handler_region->laddr().offset(default_kernel_stack_size).get(); | ||||
|         m_tss.esp0 = m_kernel_stack_for_signal_handler_region->vaddr().offset(default_kernel_stack_size).get(); | ||||
| 
 | ||||
|         push_value_on_stack(0); | ||||
|     } else { | ||||
|  | @ -427,7 +427,7 @@ ShouldUnblockThread Thread::dispatch_signal(byte signal) | |||
|     m_tss.es = 0x23; | ||||
|     m_tss.fs = 0x23; | ||||
|     m_tss.gs = 0x23; | ||||
|     m_tss.eip = handler_laddr.get(); | ||||
|     m_tss.eip = handler_vaddr.get(); | ||||
| 
 | ||||
|     // FIXME: Should we worry about the stack being 16 byte aligned when entering a signal handler?
 | ||||
|     push_value_on_stack(signal); | ||||
|  | @ -452,8 +452,8 @@ void Thread::set_default_signal_dispositions() | |||
| { | ||||
|     // FIXME: Set up all the right default actions. See signal(7).
 | ||||
|     memset(&m_signal_action_data, 0, sizeof(m_signal_action_data)); | ||||
|     m_signal_action_data[SIGCHLD].handler_or_sigaction = LinearAddress((dword)SIG_IGN); | ||||
|     m_signal_action_data[SIGWINCH].handler_or_sigaction = LinearAddress((dword)SIG_IGN); | ||||
|     m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress((dword)SIG_IGN); | ||||
|     m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress((dword)SIG_IGN); | ||||
| } | ||||
| 
 | ||||
| void Thread::push_value_on_stack(dword value) | ||||
|  | @ -465,11 +465,11 @@ void Thread::push_value_on_stack(dword value) | |||
| 
 | ||||
| void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment) | ||||
| { | ||||
|     auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, "Stack (Main thread)"); | ||||
|     auto* region = m_process.allocate_region(VirtualAddress(), default_userspace_stack_size, "Stack (Main thread)"); | ||||
|     ASSERT(region); | ||||
|     m_tss.esp = region->laddr().offset(default_userspace_stack_size).get(); | ||||
|     m_tss.esp = region->vaddr().offset(default_userspace_stack_size).get(); | ||||
| 
 | ||||
|     char* stack_base = (char*)region->laddr().get(); | ||||
|     char* stack_base = (char*)region->vaddr().get(); | ||||
|     int argc = arguments.size(); | ||||
|     char** argv = (char**)stack_base; | ||||
|     char** env = argv + arguments.size() + 1; | ||||
|  | @ -511,9 +511,9 @@ void Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vect | |||
| 
 | ||||
| void Thread::make_userspace_stack_for_secondary_thread(void* argument) | ||||
| { | ||||
|     auto* region = m_process.allocate_region(LinearAddress(), default_userspace_stack_size, String::format("Stack (Thread %d)", tid())); | ||||
|     auto* region = m_process.allocate_region(VirtualAddress(), default_userspace_stack_size, String::format("Stack (Thread %d)", tid())); | ||||
|     ASSERT(region); | ||||
|     m_tss.esp = region->laddr().offset(default_userspace_stack_size).get(); | ||||
|     m_tss.esp = region->vaddr().offset(default_userspace_stack_size).get(); | ||||
| 
 | ||||
|     // NOTE: The stack needs to be 16-byte aligned.
 | ||||
|     push_value_on_stack((dword)argument); | ||||
|  |  | |||
|  | @ -6,7 +6,7 @@ | |||
| #include <AK/RetainPtr.h> | ||||
| #include <AK/Vector.h> | ||||
| #include <Kernel/KResult.h> | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| #include <Kernel/UnixTypes.h> | ||||
| #include <Kernel/VM/Region.h> | ||||
| #include <Kernel/i386.h> | ||||
|  | @ -24,7 +24,7 @@ enum class ShouldUnblockThread | |||
| }; | ||||
| 
 | ||||
| struct SignalActionData { | ||||
|     LinearAddress handler_or_sigaction; | ||||
|     VirtualAddress handler_or_sigaction; | ||||
|     dword mask { 0 }; | ||||
|     int flags { 0 }; | ||||
| }; | ||||
|  | @ -112,7 +112,7 @@ public: | |||
|     dword ticks_left() const { return m_ticks_left; } | ||||
| 
 | ||||
|     dword kernel_stack_base() const { return m_kernel_stack_base; } | ||||
|     dword kernel_stack_for_signal_handler_base() const { return m_kernel_stack_for_signal_handler_region ? m_kernel_stack_for_signal_handler_region->laddr().get() : 0; } | ||||
|     dword kernel_stack_for_signal_handler_base() const { return m_kernel_stack_for_signal_handler_region ? m_kernel_stack_for_signal_handler_region->vaddr().get() : 0; } | ||||
| 
 | ||||
|     void set_selector(word s) { m_far_ptr.selector = s; } | ||||
|     void set_state(State); | ||||
|  |  | |||
|  | @ -66,14 +66,14 @@ void MemoryManager::initialize_paging() | |||
|     dbgprintf("MM: Protect against null dereferences\n"); | ||||
| #endif | ||||
|     // Make null dereferences crash.
 | ||||
|     map_protected(LinearAddress(0), PAGE_SIZE); | ||||
|     map_protected(VirtualAddress(0), PAGE_SIZE); | ||||
| 
 | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: Identity map bottom 4MB\n"); | ||||
| #endif | ||||
|     // The bottom 4 MB (except for the null page) are identity mapped & supervisor only.
 | ||||
|     // Every process shares these mappings.
 | ||||
|     create_identity_mapping(kernel_page_directory(), LinearAddress(PAGE_SIZE), (4 * MB) - PAGE_SIZE); | ||||
|     create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (4 * MB) - PAGE_SIZE); | ||||
| 
 | ||||
|     // Basic memory map:
 | ||||
|     // 0      -> 512 kB         Kernel code. Root page directory & PDE 0.
 | ||||
|  | @ -90,7 +90,7 @@ void MemoryManager::initialize_paging() | |||
|     dbgprintf("MM: 4MB-%uMB available for allocation\n", m_ram_size / 1048576); | ||||
|     for (size_t i = (4 * MB); i < m_ram_size; i += PAGE_SIZE) | ||||
|         m_free_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(i), false)); | ||||
|     m_quickmap_addr = LinearAddress((1 * MB) - PAGE_SIZE); | ||||
|     m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE); | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: Quickmap will use P%x\n", m_quickmap_addr.get()); | ||||
|     dbgprintf("MM: Installing page directory\n"); | ||||
|  | @ -118,12 +118,12 @@ RetainPtr<PhysicalPage> MemoryManager::allocate_page_table(PageDirectory& page_d | |||
|     return physical_page; | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, LinearAddress laddr, size_t size) | ||||
| void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, VirtualAddress vaddr, size_t size) | ||||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     // FIXME: ASSERT(laddr is 4KB aligned);
 | ||||
|     // FIXME: ASSERT(vaddr is 4KB aligned);
 | ||||
|     for (dword offset = 0; offset < size; offset += PAGE_SIZE) { | ||||
|         auto pte_address = laddr.offset(offset); | ||||
|         auto pte_address = vaddr.offset(offset); | ||||
|         auto pte = ensure_pte(page_directory, pte_address); | ||||
|         pte.set_physical_page_base(0); | ||||
|         pte.set_user_allowed(false); | ||||
|  | @ -133,16 +133,16 @@ void MemoryManager::remove_identity_mapping(PageDirectory& page_directory, Linea | |||
|     } | ||||
| } | ||||
| 
 | ||||
| auto MemoryManager::ensure_pte(PageDirectory& page_directory, LinearAddress laddr) -> PageTableEntry | ||||
| auto MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) -> PageTableEntry | ||||
| { | ||||
|     ASSERT_INTERRUPTS_DISABLED(); | ||||
|     dword page_directory_index = (laddr.get() >> 22) & 0x3ff; | ||||
|     dword page_table_index = (laddr.get() >> 12) & 0x3ff; | ||||
|     dword page_directory_index = (vaddr.get() >> 22) & 0x3ff; | ||||
|     dword page_table_index = (vaddr.get() >> 12) & 0x3ff; | ||||
| 
 | ||||
|     PageDirectoryEntry pde = PageDirectoryEntry(&page_directory.entries()[page_directory_index]); | ||||
|     if (!pde.is_present()) { | ||||
| #ifdef MM_DEBUG | ||||
|         dbgprintf("MM: PDE %u not present (requested for L%x), allocating\n", page_directory_index, laddr.get()); | ||||
|         dbgprintf("MM: PDE %u not present (requested for L%x), allocating\n", page_directory_index, vaddr.get()); | ||||
| #endif | ||||
|         if (page_directory_index == 0) { | ||||
|             ASSERT(&page_directory == m_kernel_page_directory); | ||||
|  | @ -159,7 +159,7 @@ auto MemoryManager::ensure_pte(PageDirectory& page_directory, LinearAddress ladd | |||
|                 &page_directory == m_kernel_page_directory ? "Kernel" : "User", | ||||
|                 page_directory.cr3(), | ||||
|                 page_directory_index, | ||||
|                 laddr.get(), | ||||
|                 vaddr.get(), | ||||
|                 page_table->paddr().get()); | ||||
| #endif | ||||
| 
 | ||||
|  | @ -173,12 +173,12 @@ auto MemoryManager::ensure_pte(PageDirectory& page_directory, LinearAddress ladd | |||
|     return PageTableEntry(&pde.page_table_base()[page_table_index]); | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::map_protected(LinearAddress laddr, size_t length) | ||||
| void MemoryManager::map_protected(VirtualAddress vaddr, size_t length) | ||||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     // FIXME: ASSERT(linearAddress is 4KB aligned);
 | ||||
|     for (dword offset = 0; offset < length; offset += PAGE_SIZE) { | ||||
|         auto pte_address = laddr.offset(offset); | ||||
|         auto pte_address = vaddr.offset(offset); | ||||
|         auto pte = ensure_pte(kernel_page_directory(), pte_address); | ||||
|         pte.set_physical_page_base(pte_address.get()); | ||||
|         pte.set_user_allowed(false); | ||||
|  | @ -188,12 +188,12 @@ void MemoryManager::map_protected(LinearAddress laddr, size_t length) | |||
|     } | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::create_identity_mapping(PageDirectory& page_directory, LinearAddress laddr, size_t size) | ||||
| void MemoryManager::create_identity_mapping(PageDirectory& page_directory, VirtualAddress vaddr, size_t size) | ||||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     ASSERT((laddr.get() & ~PAGE_MASK) == 0); | ||||
|     ASSERT((vaddr.get() & ~PAGE_MASK) == 0); | ||||
|     for (dword offset = 0; offset < size; offset += PAGE_SIZE) { | ||||
|         auto pte_address = laddr.offset(offset); | ||||
|         auto pte_address = vaddr.offset(offset); | ||||
|         auto pte = ensure_pte(page_directory, pte_address); | ||||
|         pte.set_physical_page_base(pte_address.get()); | ||||
|         pte.set_user_allowed(false); | ||||
|  | @ -208,41 +208,41 @@ void MemoryManager::initialize() | |||
|     s_the = new MemoryManager; | ||||
| } | ||||
| 
 | ||||
| Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr) | ||||
| Region* MemoryManager::region_from_vaddr(Process& process, VirtualAddress vaddr) | ||||
| { | ||||
|     ASSERT_INTERRUPTS_DISABLED(); | ||||
| 
 | ||||
|     if (laddr.get() >= 0xc0000000) { | ||||
|     if (vaddr.get() >= 0xc0000000) { | ||||
|         for (auto& region : MM.m_kernel_regions) { | ||||
|             if (region->contains(laddr)) | ||||
|             if (region->contains(vaddr)) | ||||
|                 return region; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
 | ||||
|     for (auto& region : process.m_regions) { | ||||
|         if (region->contains(laddr)) | ||||
|         if (region->contains(vaddr)) | ||||
|             return region.ptr(); | ||||
|     } | ||||
|     dbgprintf("%s(%u) Couldn't find region for L%x (CR3=%x)\n", process.name().characters(), process.pid(), laddr.get(), process.page_directory().cr3()); | ||||
|     dbgprintf("%s(%u) Couldn't find region for L%x (CR3=%x)\n", process.name().characters(), process.pid(), vaddr.get(), process.page_directory().cr3()); | ||||
|     return nullptr; | ||||
| } | ||||
| 
 | ||||
| const Region* MemoryManager::region_from_laddr(const Process& process, LinearAddress laddr) | ||||
| const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAddress vaddr) | ||||
| { | ||||
|     if (laddr.get() >= 0xc0000000) { | ||||
|     if (vaddr.get() >= 0xc0000000) { | ||||
|         for (auto& region : MM.m_kernel_regions) { | ||||
|             if (region->contains(laddr)) | ||||
|             if (region->contains(vaddr)) | ||||
|                 return region; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     // FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
 | ||||
|     for (auto& region : process.m_regions) { | ||||
|         if (region->contains(laddr)) | ||||
|         if (region->contains(vaddr)) | ||||
|             return region.ptr(); | ||||
|     } | ||||
|     dbgprintf("%s(%u) Couldn't find region for L%x (CR3=%x)\n", process.name().characters(), process.pid(), laddr.get(), process.page_directory().cr3()); | ||||
|     dbgprintf("%s(%u) Couldn't find region for L%x (CR3=%x)\n", process.name().characters(), process.pid(), vaddr.get(), process.page_directory().cr3()); | ||||
|     return nullptr; | ||||
| } | ||||
| 
 | ||||
|  | @ -290,7 +290,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region) | |||
|     auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]); | ||||
|     auto physical_page = allocate_physical_page(ShouldZeroFill::No); | ||||
|     byte* dest_ptr = quickmap_page(*physical_page); | ||||
|     const byte* src_ptr = region.laddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); | ||||
|     const byte* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); | ||||
| #ifdef PAGE_FAULT_DEBUG | ||||
|     dbgprintf("      >> COW P%x <- P%x\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get()); | ||||
| #endif | ||||
|  | @ -345,7 +345,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re | |||
|         return false; | ||||
|     } | ||||
|     remap_region_page(region, page_index_in_region, true); | ||||
|     byte* dest_ptr = region.laddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); | ||||
|     byte* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); | ||||
|     memcpy(dest_ptr, page_buffer, PAGE_SIZE); | ||||
|     return true; | ||||
| } | ||||
|  | @ -355,15 +355,15 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) | |||
|     ASSERT_INTERRUPTS_DISABLED(); | ||||
|     ASSERT(current); | ||||
| #ifdef PAGE_FAULT_DEBUG | ||||
|     dbgprintf("MM: handle_page_fault(%w) at L%x\n", fault.code(), fault.laddr().get()); | ||||
|     dbgprintf("MM: handle_page_fault(%w) at L%x\n", fault.code(), fault.vaddr().get()); | ||||
| #endif | ||||
|     ASSERT(fault.laddr() != m_quickmap_addr); | ||||
|     auto* region = region_from_laddr(current->process(), fault.laddr()); | ||||
|     ASSERT(fault.vaddr() != m_quickmap_addr); | ||||
|     auto* region = region_from_vaddr(current->process(), fault.vaddr()); | ||||
|     if (!region) { | ||||
|         kprintf("NP(error) fault at invalid address L%x\n", fault.laddr().get()); | ||||
|         kprintf("NP(error) fault at invalid address L%x\n", fault.vaddr().get()); | ||||
|         return PageFaultResponse::ShouldCrash; | ||||
|     } | ||||
|     auto page_index_in_region = region->page_index_from_address(fault.laddr()); | ||||
|     auto page_index_in_region = region->page_index_from_address(fault.vaddr()); | ||||
|     if (fault.is_not_present()) { | ||||
|         if (region->vmo().inode()) { | ||||
| #ifdef PAGE_FAULT_DEBUG | ||||
|  | @ -387,7 +387,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) | |||
|             ASSERT(success); | ||||
|             return PageFaultResponse::Continue; | ||||
|         } | ||||
|         kprintf("PV(error) fault in Region{%p}[%u] at L%x\n", region, page_index_in_region, fault.laddr().get()); | ||||
|         kprintf("PV(error) fault in Region{%p}[%u] at L%x\n", region, page_index_in_region, fault.vaddr().get()); | ||||
|     } else { | ||||
|         ASSERT_NOT_REACHED(); | ||||
|     } | ||||
|  | @ -462,22 +462,22 @@ void MemoryManager::flush_entire_tlb() | |||
|             : "%eax", "memory"); | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::flush_tlb(LinearAddress laddr) | ||||
| void MemoryManager::flush_tlb(VirtualAddress vaddr) | ||||
| { | ||||
|     asm volatile("invlpg %0" | ||||
|                  : | ||||
|                  : "m"(*(char*)laddr.get()) | ||||
|                  : "m"(*(char*)vaddr.get()) | ||||
|                  : "memory"); | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::map_for_kernel(LinearAddress laddr, PhysicalAddress paddr) | ||||
| void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr) | ||||
| { | ||||
|     auto pte = ensure_pte(kernel_page_directory(), laddr); | ||||
|     auto pte = ensure_pte(kernel_page_directory(), vaddr); | ||||
|     pte.set_physical_page_base(paddr.get()); | ||||
|     pte.set_present(true); | ||||
|     pte.set_writable(true); | ||||
|     pte.set_user_allowed(false); | ||||
|     flush_tlb(laddr); | ||||
|     flush_tlb(vaddr); | ||||
| } | ||||
| 
 | ||||
| byte* MemoryManager::quickmap_page(PhysicalPage& physical_page) | ||||
|  | @ -485,35 +485,35 @@ byte* MemoryManager::quickmap_page(PhysicalPage& physical_page) | |||
|     ASSERT_INTERRUPTS_DISABLED(); | ||||
|     ASSERT(!m_quickmap_in_use); | ||||
|     m_quickmap_in_use = true; | ||||
|     auto page_laddr = m_quickmap_addr; | ||||
|     auto pte = ensure_pte(kernel_page_directory(), page_laddr); | ||||
|     auto page_vaddr = m_quickmap_addr; | ||||
|     auto pte = ensure_pte(kernel_page_directory(), page_vaddr); | ||||
|     pte.set_physical_page_base(physical_page.paddr().get()); | ||||
|     pte.set_present(true); | ||||
|     pte.set_writable(true); | ||||
|     pte.set_user_allowed(false); | ||||
|     flush_tlb(page_laddr); | ||||
|     flush_tlb(page_vaddr); | ||||
|     ASSERT((dword)pte.physical_page_base() == physical_page.paddr().get()); | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: >> quickmap_page L%x => P%x @ PTE=%p\n", page_laddr, physical_page.paddr().get(), pte.ptr()); | ||||
|     dbgprintf("MM: >> quickmap_page L%x => P%x @ PTE=%p\n", page_vaddr, physical_page.paddr().get(), pte.ptr()); | ||||
| #endif | ||||
|     return page_laddr.as_ptr(); | ||||
|     return page_vaddr.as_ptr(); | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::unquickmap_page() | ||||
| { | ||||
|     ASSERT_INTERRUPTS_DISABLED(); | ||||
|     ASSERT(m_quickmap_in_use); | ||||
|     auto page_laddr = m_quickmap_addr; | ||||
|     auto pte = ensure_pte(kernel_page_directory(), page_laddr); | ||||
|     auto page_vaddr = m_quickmap_addr; | ||||
|     auto pte = ensure_pte(kernel_page_directory(), page_vaddr); | ||||
| #ifdef MM_DEBUG | ||||
|     auto old_physical_address = pte.physical_page_base(); | ||||
| #endif | ||||
|     pte.set_physical_page_base(0); | ||||
|     pte.set_present(false); | ||||
|     pte.set_writable(false); | ||||
|     flush_tlb(page_laddr); | ||||
|     flush_tlb(page_vaddr); | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: >> unquickmap_page L%x =/> P%x\n", page_laddr, old_physical_address); | ||||
|     dbgprintf("MM: >> unquickmap_page L%x =/> P%x\n", page_vaddr, old_physical_address); | ||||
| #endif | ||||
|     m_quickmap_in_use = false; | ||||
| } | ||||
|  | @ -522,8 +522,8 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg | |||
| { | ||||
|     ASSERT(region.page_directory()); | ||||
|     InterruptDisabler disabler; | ||||
|     auto page_laddr = region.laddr().offset(page_index_in_region * PAGE_SIZE); | ||||
|     auto pte = ensure_pte(*region.page_directory(), page_laddr); | ||||
|     auto page_vaddr = region.vaddr().offset(page_index_in_region * PAGE_SIZE); | ||||
|     auto pte = ensure_pte(*region.page_directory(), page_vaddr); | ||||
|     auto& physical_page = region.vmo().physical_pages()[page_index_in_region]; | ||||
|     ASSERT(physical_page); | ||||
|     pte.set_physical_page_base(physical_page->paddr().get()); | ||||
|  | @ -535,9 +535,9 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg | |||
|     pte.set_cache_disabled(!region.vmo().m_allow_cpu_caching); | ||||
|     pte.set_write_through(!region.vmo().m_allow_cpu_caching); | ||||
|     pte.set_user_allowed(user_allowed); | ||||
|     region.page_directory()->flush(page_laddr); | ||||
|     region.page_directory()->flush(page_vaddr); | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", region.page_directory()->cr3(), pte.ptr(), region.name().characters(), page_laddr.get(), physical_page->paddr().get(), physical_page.ptr()); | ||||
|     dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", region.page_directory()->cr3(), pte.ptr(), region.name().characters(), page_vaddr.get(), physical_page->paddr().get(), physical_page.ptr()); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
|  | @ -545,10 +545,10 @@ void MemoryManager::remap_region(PageDirectory& page_directory, Region& region) | |||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     ASSERT(region.page_directory() == &page_directory); | ||||
|     map_region_at_address(page_directory, region, region.laddr(), true); | ||||
|     map_region_at_address(page_directory, region, region.vaddr(), true); | ||||
| } | ||||
| 
 | ||||
| void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, LinearAddress laddr, bool user_allowed) | ||||
| void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, VirtualAddress vaddr, bool user_allowed) | ||||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     region.set_page_directory(page_directory); | ||||
|  | @ -557,8 +557,8 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& | |||
|     dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count()); | ||||
| #endif | ||||
|     for (size_t i = 0; i < region.page_count(); ++i) { | ||||
|         auto page_laddr = laddr.offset(i * PAGE_SIZE); | ||||
|         auto pte = ensure_pte(page_directory, page_laddr); | ||||
|         auto page_vaddr = vaddr.offset(i * PAGE_SIZE); | ||||
|         auto pte = ensure_pte(page_directory, page_vaddr); | ||||
|         auto& physical_page = vmo.physical_pages()[region.first_page_index() + i]; | ||||
|         if (physical_page) { | ||||
|             pte.set_physical_page_base(physical_page->paddr().get()); | ||||
|  | @ -576,9 +576,9 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& | |||
|             pte.set_writable(region.is_writable()); | ||||
|         } | ||||
|         pte.set_user_allowed(user_allowed); | ||||
|         page_directory.flush(page_laddr); | ||||
|         page_directory.flush(page_vaddr); | ||||
| #ifdef MM_DEBUG | ||||
|         dbgprintf("MM: >> map_region_at_address (PD=%x) '%s' L%x => P%x (@%p)\n", &page_directory, region.name().characters(), page_laddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr()); | ||||
|         dbgprintf("MM: >> map_region_at_address (PD=%x) '%s' L%x => P%x (@%p)\n", &page_directory, region.name().characters(), page_vaddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr()); | ||||
| #endif | ||||
|     } | ||||
| } | ||||
|  | @ -588,16 +588,16 @@ bool MemoryManager::unmap_region(Region& region) | |||
|     ASSERT(region.page_directory()); | ||||
|     InterruptDisabler disabler; | ||||
|     for (size_t i = 0; i < region.page_count(); ++i) { | ||||
|         auto laddr = region.laddr().offset(i * PAGE_SIZE); | ||||
|         auto pte = ensure_pte(*region.page_directory(), laddr); | ||||
|         auto vaddr = region.vaddr().offset(i * PAGE_SIZE); | ||||
|         auto pte = ensure_pte(*region.page_directory(), vaddr); | ||||
|         pte.set_physical_page_base(0); | ||||
|         pte.set_present(false); | ||||
|         pte.set_writable(false); | ||||
|         pte.set_user_allowed(false); | ||||
|         region.page_directory()->flush(laddr); | ||||
|         region.page_directory()->flush(vaddr); | ||||
| #ifdef MM_DEBUG | ||||
|         auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i]; | ||||
|         dbgprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, physical_page ? physical_page->paddr().get() : 0); | ||||
|         dbgprintf("MM: >> Unmapped L%x => P%x <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0); | ||||
| #endif | ||||
|     } | ||||
|     region.release_page_directory(); | ||||
|  | @ -606,19 +606,19 @@ bool MemoryManager::unmap_region(Region& region) | |||
| 
 | ||||
| bool MemoryManager::map_region(Process& process, Region& region) | ||||
| { | ||||
|     map_region_at_address(process.page_directory(), region, region.laddr(), true); | ||||
|     map_region_at_address(process.page_directory(), region, region.vaddr(), true); | ||||
|     return true; | ||||
| } | ||||
| 
 | ||||
| bool MemoryManager::validate_user_read(const Process& process, LinearAddress laddr) const | ||||
| bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const | ||||
| { | ||||
|     auto* region = region_from_laddr(process, laddr); | ||||
|     auto* region = region_from_vaddr(process, vaddr); | ||||
|     return region && region->is_readable(); | ||||
| } | ||||
| 
 | ||||
| bool MemoryManager::validate_user_write(const Process& process, LinearAddress laddr) const | ||||
| bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr) const | ||||
| { | ||||
|     auto* region = region_from_laddr(process, laddr); | ||||
|     auto* region = region_from_vaddr(process, vaddr); | ||||
|     return region && region->is_writable(); | ||||
| } | ||||
| 
 | ||||
|  | @ -637,7 +637,7 @@ void MemoryManager::unregister_vmo(VMObject& vmo) | |||
| void MemoryManager::register_region(Region& region) | ||||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     if (region.laddr().get() >= 0xc0000000) | ||||
|     if (region.vaddr().get() >= 0xc0000000) | ||||
|         m_kernel_regions.set(®ion); | ||||
|     else | ||||
|         m_user_regions.set(®ion); | ||||
|  | @ -646,7 +646,7 @@ void MemoryManager::register_region(Region& region) | |||
| void MemoryManager::unregister_region(Region& region) | ||||
| { | ||||
|     InterruptDisabler disabler; | ||||
|     if (region.laddr().get() >= 0xc0000000) | ||||
|     if (region.vaddr().get() >= 0xc0000000) | ||||
|         m_kernel_regions.remove(®ion); | ||||
|     else | ||||
|         m_user_regions.remove(®ion); | ||||
|  |  | |||
|  | @ -12,7 +12,7 @@ | |||
| #include <AK/Vector.h> | ||||
| #include <AK/Weakable.h> | ||||
| #include <Kernel/FileSystem/InodeIdentifier.h> | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| #include <Kernel/VM/PhysicalPage.h> | ||||
| #include <Kernel/VM/Region.h> | ||||
| #include <Kernel/VM/VMObject.h> | ||||
|  | @ -52,8 +52,8 @@ public: | |||
| 
 | ||||
|     void enter_process_paging_scope(Process&); | ||||
| 
 | ||||
|     bool validate_user_read(const Process&, LinearAddress) const; | ||||
|     bool validate_user_write(const Process&, LinearAddress) const; | ||||
|     bool validate_user_read(const Process&, VirtualAddress) const; | ||||
|     bool validate_user_write(const Process&, VirtualAddress) const; | ||||
| 
 | ||||
|     enum class ShouldZeroFill | ||||
|     { | ||||
|  | @ -71,10 +71,10 @@ public: | |||
|     int user_physical_pages_in_existence() const { return s_user_physical_pages_in_existence; } | ||||
|     int super_physical_pages_in_existence() const { return s_super_physical_pages_in_existence; } | ||||
| 
 | ||||
|     void map_for_kernel(LinearAddress, PhysicalAddress); | ||||
|     void map_for_kernel(VirtualAddress, PhysicalAddress); | ||||
| 
 | ||||
|     RetainPtr<Region> allocate_kernel_region(size_t, String&& name); | ||||
|     void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible); | ||||
|     void map_region_at_address(PageDirectory&, Region&, VirtualAddress, bool user_accessible); | ||||
| 
 | ||||
| private: | ||||
|     MemoryManager(); | ||||
|  | @ -89,17 +89,17 @@ private: | |||
| 
 | ||||
|     void initialize_paging(); | ||||
|     void flush_entire_tlb(); | ||||
|     void flush_tlb(LinearAddress); | ||||
|     void flush_tlb(VirtualAddress); | ||||
| 
 | ||||
|     RetainPtr<PhysicalPage> allocate_page_table(PageDirectory&, unsigned index); | ||||
| 
 | ||||
|     void map_protected(LinearAddress, size_t length); | ||||
|     void map_protected(VirtualAddress, size_t length); | ||||
| 
 | ||||
|     void create_identity_mapping(PageDirectory&, LinearAddress, size_t length); | ||||
|     void remove_identity_mapping(PageDirectory&, LinearAddress, size_t); | ||||
|     void create_identity_mapping(PageDirectory&, VirtualAddress, size_t length); | ||||
|     void remove_identity_mapping(PageDirectory&, VirtualAddress, size_t); | ||||
| 
 | ||||
|     static Region* region_from_laddr(Process&, LinearAddress); | ||||
|     static const Region* region_from_laddr(const Process&, LinearAddress); | ||||
|     static Region* region_from_vaddr(Process&, VirtualAddress); | ||||
|     static const Region* region_from_vaddr(const Process&, VirtualAddress); | ||||
| 
 | ||||
|     bool copy_on_write(Region&, unsigned page_index_in_region); | ||||
|     bool page_in_from_inode(Region&, unsigned page_index_in_region); | ||||
|  | @ -215,12 +215,12 @@ private: | |||
|     static unsigned s_user_physical_pages_in_existence; | ||||
|     static unsigned s_super_physical_pages_in_existence; | ||||
| 
 | ||||
|     PageTableEntry ensure_pte(PageDirectory&, LinearAddress); | ||||
|     PageTableEntry ensure_pte(PageDirectory&, VirtualAddress); | ||||
| 
 | ||||
|     RetainPtr<PageDirectory> m_kernel_page_directory; | ||||
|     dword* m_page_table_zero; | ||||
| 
 | ||||
|     LinearAddress m_quickmap_addr; | ||||
|     VirtualAddress m_quickmap_addr; | ||||
| 
 | ||||
|     Vector<Retained<PhysicalPage>> m_free_physical_pages; | ||||
|     Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages; | ||||
|  |  | |||
|  | @ -7,13 +7,13 @@ static const dword userspace_range_base = 0x01000000; | |||
| static const dword kernelspace_range_base = 0xc0000000; | ||||
| 
 | ||||
| PageDirectory::PageDirectory(PhysicalAddress paddr) | ||||
|     : m_range_allocator(LinearAddress(0xc0000000), 0x3f000000) | ||||
|     : m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000) | ||||
| { | ||||
|     m_directory_page = PhysicalPage::create_eternal(paddr, true); | ||||
| } | ||||
| 
 | ||||
| PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator) | ||||
|     : m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(LinearAddress(userspace_range_base), kernelspace_range_base - userspace_range_base)) | ||||
|     : m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(VirtualAddress(userspace_range_base), kernelspace_range_base - userspace_range_base)) | ||||
| { | ||||
|     MM.populate_page_directory(*this); | ||||
| } | ||||
|  | @ -25,13 +25,13 @@ PageDirectory::~PageDirectory() | |||
| #endif | ||||
| } | ||||
| 
 | ||||
| void PageDirectory::flush(LinearAddress laddr) | ||||
| void PageDirectory::flush(VirtualAddress vaddr) | ||||
| { | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: Flush page L%x\n", laddr.get()); | ||||
|     dbgprintf("MM: Flush page L%x\n", vaddr.get()); | ||||
| #endif | ||||
|     if (!current) | ||||
|         return; | ||||
|     if (this == &MM.kernel_page_directory() || ¤t->process().page_directory() == this) | ||||
|         MM.flush_tlb(laddr); | ||||
|         MM.flush_tlb(vaddr); | ||||
| } | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ public: | |||
|     dword cr3() const { return m_directory_page->paddr().get(); } | ||||
|     dword* entries() { return reinterpret_cast<dword*>(cr3()); } | ||||
| 
 | ||||
|     void flush(LinearAddress); | ||||
|     void flush(VirtualAddress); | ||||
| 
 | ||||
|     RangeAllocator& range_allocator() { return m_range_allocator; } | ||||
| 
 | ||||
|  |  | |||
|  | @ -4,7 +4,7 @@ | |||
| 
 | ||||
| //#define VRA_DEBUG
 | ||||
| 
 | ||||
| RangeAllocator::RangeAllocator(LinearAddress base, size_t size) | ||||
| RangeAllocator::RangeAllocator(VirtualAddress base, size_t size) | ||||
| { | ||||
|     m_available_ranges.append({ base, size }); | ||||
| #ifdef VRA_DEBUG | ||||
|  | @ -82,7 +82,7 @@ Range RangeAllocator::allocate_anywhere(size_t size) | |||
|     return {}; | ||||
| } | ||||
| 
 | ||||
| Range RangeAllocator::allocate_specific(LinearAddress base, size_t size) | ||||
| Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size) | ||||
| { | ||||
|     Range allocated_range(base, size); | ||||
|     for (int i = 0; i < m_available_ranges.size(); ++i) { | ||||
|  |  | |||
|  | @ -1,33 +1,33 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <AK/Vector.h> | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| 
 | ||||
| class Range { | ||||
|     friend class RangeAllocator; | ||||
| 
 | ||||
| public: | ||||
|     Range() {} | ||||
|     Range(LinearAddress base, size_t size) | ||||
|     Range(VirtualAddress base, size_t size) | ||||
|         : m_base(base) | ||||
|         , m_size(size) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     LinearAddress base() const { return m_base; } | ||||
|     VirtualAddress base() const { return m_base; } | ||||
|     size_t size() const { return m_size; } | ||||
|     bool is_valid() const { return !m_base.is_null(); } | ||||
| 
 | ||||
|     bool contains(LinearAddress laddr) const { return laddr >= base() && laddr < end(); } | ||||
|     bool contains(VirtualAddress vaddr) const { return vaddr >= base() && vaddr < end(); } | ||||
| 
 | ||||
|     LinearAddress end() const { return m_base.offset(m_size); } | ||||
|     VirtualAddress end() const { return m_base.offset(m_size); } | ||||
| 
 | ||||
|     bool operator==(const Range& other) const | ||||
|     { | ||||
|         return m_base == other.m_base && m_size == other.m_size; | ||||
|     } | ||||
| 
 | ||||
|     bool contains(LinearAddress base, size_t size) const | ||||
|     bool contains(VirtualAddress base, size_t size) const | ||||
|     { | ||||
|         return base >= m_base && base.offset(size) <= end(); | ||||
|     } | ||||
|  | @ -40,18 +40,18 @@ public: | |||
|     Vector<Range, 2> carve(const Range&); | ||||
| 
 | ||||
| private: | ||||
|     LinearAddress m_base; | ||||
|     VirtualAddress m_base; | ||||
|     size_t m_size { 0 }; | ||||
| }; | ||||
| 
 | ||||
| class RangeAllocator { | ||||
| public: | ||||
|     RangeAllocator(LinearAddress, size_t); | ||||
|     RangeAllocator(VirtualAddress, size_t); | ||||
|     RangeAllocator(const RangeAllocator&); | ||||
|     ~RangeAllocator(); | ||||
| 
 | ||||
|     Range allocate_anywhere(size_t); | ||||
|     Range allocate_specific(LinearAddress, size_t); | ||||
|     Range allocate_specific(VirtualAddress, size_t); | ||||
|     void deallocate(Range); | ||||
| 
 | ||||
|     void dump() const; | ||||
|  |  | |||
|  | @ -75,7 +75,7 @@ Retained<Region> Region::clone() | |||
|             current->process().name().characters(), | ||||
|             current->pid(), | ||||
|             m_name.characters(), | ||||
|             laddr().get()); | ||||
|             vaddr().get()); | ||||
| #endif | ||||
|         // Create a new region backed by the same VMObject.
 | ||||
|         return adopt(*new Region(m_range, m_vmo.copy_ref(), m_offset_in_vmo, String(m_name), m_access)); | ||||
|  | @ -86,7 +86,7 @@ Retained<Region> Region::clone() | |||
|         current->process().name().characters(), | ||||
|         current->pid(), | ||||
|         m_name.characters(), | ||||
|         laddr().get()); | ||||
|         vaddr().get()); | ||||
| #endif | ||||
|     // Set up a COW region. The parent (this) region becomes COW as well!
 | ||||
|     m_cow_map.fill(true); | ||||
|  | @ -98,7 +98,7 @@ int Region::commit() | |||
| { | ||||
|     InterruptDisabler disabler; | ||||
| #ifdef MM_DEBUG | ||||
|     dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at L%x\n", vmo().page_count(), this, &vmo(), laddr().get()); | ||||
|     dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at L%x\n", vmo().page_count(), this, &vmo(), vaddr().get()); | ||||
| #endif | ||||
|     for (size_t i = first_page_index(); i <= last_page_index(); ++i) { | ||||
|         if (!vmo().physical_pages()[i].is_null()) | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ public: | |||
|     Region(const Range&, RetainPtr<Inode>&&, String&&, byte access); | ||||
|     ~Region(); | ||||
| 
 | ||||
|     LinearAddress laddr() const { return m_range.base(); } | ||||
|     VirtualAddress vaddr() const { return m_range.base(); } | ||||
|     size_t size() const { return m_range.size(); } | ||||
|     bool is_readable() const { return m_access & Access::Read; } | ||||
|     bool is_writable() const { return m_access & Access::Write; } | ||||
|  | @ -41,14 +41,14 @@ public: | |||
| 
 | ||||
|     Retained<Region> clone(); | ||||
| 
 | ||||
|     bool contains(LinearAddress laddr) const | ||||
|     bool contains(VirtualAddress vaddr) const | ||||
|     { | ||||
|         return m_range.contains(laddr); | ||||
|         return m_range.contains(vaddr); | ||||
|     } | ||||
| 
 | ||||
|     unsigned page_index_from_address(LinearAddress laddr) const | ||||
|     unsigned page_index_from_address(VirtualAddress vaddr) const | ||||
|     { | ||||
|         return (laddr - m_range.base()).get() / PAGE_SIZE; | ||||
|         return (vaddr - m_range.base()).get() / PAGE_SIZE; | ||||
|     } | ||||
| 
 | ||||
|     size_t first_page_index() const | ||||
|  |  | |||
							
								
								
									
										39
									
								
								Kernel/VirtualAddress.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								Kernel/VirtualAddress.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,39 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <AK/Types.h> | ||||
| 
 | ||||
| class VirtualAddress { | ||||
| public: | ||||
|     VirtualAddress() {} | ||||
|     explicit VirtualAddress(dword address) | ||||
|         : m_address(address) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     bool is_null() const { return m_address == 0; } | ||||
| 
 | ||||
|     VirtualAddress offset(dword o) const { return VirtualAddress(m_address + o); } | ||||
|     dword get() const { return m_address; } | ||||
|     void set(dword address) { m_address = address; } | ||||
|     void mask(dword m) { m_address &= m; } | ||||
| 
 | ||||
|     bool operator<=(const VirtualAddress& other) const { return m_address <= other.m_address; } | ||||
|     bool operator>=(const VirtualAddress& other) const { return m_address >= other.m_address; } | ||||
|     bool operator>(const VirtualAddress& other) const { return m_address > other.m_address; } | ||||
|     bool operator<(const VirtualAddress& other) const { return m_address < other.m_address; } | ||||
|     bool operator==(const VirtualAddress& other) const { return m_address == other.m_address; } | ||||
|     bool operator!=(const VirtualAddress& other) const { return m_address != other.m_address; } | ||||
| 
 | ||||
|     byte* as_ptr() { return reinterpret_cast<byte*>(m_address); } | ||||
|     const byte* as_ptr() const { return reinterpret_cast<const byte*>(m_address); } | ||||
| 
 | ||||
|     dword page_base() const { return m_address & 0xfffff000; } | ||||
| 
 | ||||
| private: | ||||
|     dword m_address { 0 }; | ||||
| }; | ||||
| 
 | ||||
| inline VirtualAddress operator-(const VirtualAddress& a, const VirtualAddress& b) | ||||
| { | ||||
|     return VirtualAddress(a.get() - b.get()); | ||||
| } | ||||
|  | @ -275,10 +275,10 @@ void exception_14_handler(RegisterDumpWithExceptionCode& regs) | |||
|     dump(regs); | ||||
| #endif | ||||
| 
 | ||||
|     auto response = MM.handle_page_fault(PageFault(regs.exception_code, LinearAddress(faultAddress))); | ||||
|     auto response = MM.handle_page_fault(PageFault(regs.exception_code, VirtualAddress(faultAddress))); | ||||
| 
 | ||||
|     if (response == PageFaultResponse::ShouldCrash) { | ||||
|         kprintf("%s(%u:%u) unrecoverable page fault, %s laddr=%p\n", | ||||
|         kprintf("%s(%u:%u) unrecoverable page fault, %s vaddr=%p\n", | ||||
|             current->process().name().characters(), | ||||
|             current->pid(), | ||||
|             current->tid(), | ||||
|  |  | |||
|  | @ -1,6 +1,6 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <Kernel/LinearAddress.h> | ||||
| #include <Kernel/VirtualAddress.h> | ||||
| #include <Kernel/kstdio.h> | ||||
| 
 | ||||
| #define PAGE_SIZE 4096 | ||||
|  | @ -194,13 +194,13 @@ struct PageFaultFlags { | |||
| 
 | ||||
| class PageFault { | ||||
| public: | ||||
|     PageFault(word code, LinearAddress laddr) | ||||
|     PageFault(word code, VirtualAddress vaddr) | ||||
|         : m_code(code) | ||||
|         , m_laddr(laddr) | ||||
|         , m_vaddr(vaddr) | ||||
|     { | ||||
|     } | ||||
| 
 | ||||
|     LinearAddress laddr() const { return m_laddr; } | ||||
|     VirtualAddress vaddr() const { return m_vaddr; } | ||||
|     word code() const { return m_code; } | ||||
| 
 | ||||
|     bool is_not_present() const { return (m_code & 1) == PageFaultFlags::NotPresent; } | ||||
|  | @ -213,7 +213,7 @@ public: | |||
| 
 | ||||
| private: | ||||
|     word m_code; | ||||
|     LinearAddress m_laddr; | ||||
|     VirtualAddress m_vaddr; | ||||
| }; | ||||
| 
 | ||||
| struct [[gnu::packed]] RegisterDump | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andreas Kling
						Andreas Kling