mirror of
https://github.com/RGBCube/serenity
synced 2025-05-22 16:45:08 +00:00
Kernel+LibC: Allow sys$mmap() callers to specify address alignment
This is exposed via the non-standard serenity_mmap() call in userspace.
This commit is contained in:
parent
02e199a9cb
commit
31e1af732f
7 changed files with 66 additions and 30 deletions
|
@ -151,12 +151,12 @@ bool Process::in_group(gid_t gid) const
|
||||||
return m_gid == gid || m_extra_gids.contains(gid);
|
return m_gid == gid || m_extra_gids.contains(gid);
|
||||||
}
|
}
|
||||||
|
|
||||||
Range Process::allocate_range(VirtualAddress vaddr, size_t size)
|
Range Process::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
vaddr.mask(PAGE_MASK);
|
vaddr.mask(PAGE_MASK);
|
||||||
size = PAGE_ROUND_UP(size);
|
size = PAGE_ROUND_UP(size);
|
||||||
if (vaddr.is_null())
|
if (vaddr.is_null())
|
||||||
return page_directory().range_allocator().allocate_anywhere(size);
|
return page_directory().range_allocator().allocate_anywhere(size, alignment);
|
||||||
return page_directory().range_allocator().allocate_specific(vaddr, size);
|
return page_directory().range_allocator().allocate_specific(vaddr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,11 +185,9 @@ Region& Process::allocate_split_region(const Region& source_region, const Range&
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String& name, int prot, bool commit)
|
Region* Process::allocate_region(const Range& range, const String& name, int prot, bool commit)
|
||||||
{
|
{
|
||||||
auto range = allocate_range(vaddr, size);
|
ASSERT(range.is_valid());
|
||||||
if (!range.is_valid())
|
|
||||||
return nullptr;
|
|
||||||
auto& region = add_region(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
|
auto& region = add_region(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
|
||||||
region.map(page_directory());
|
region.map(page_directory());
|
||||||
if (commit)
|
if (commit)
|
||||||
|
@ -197,6 +195,14 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
|
||||||
return ®ion;
|
return ®ion;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String& name, int prot, bool commit)
|
||||||
|
{
|
||||||
|
auto range = allocate_range(vaddr, size);
|
||||||
|
if (!range.is_valid())
|
||||||
|
return nullptr;
|
||||||
|
return allocate_region(range, name, prot, commit);
|
||||||
|
}
|
||||||
|
|
||||||
Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size, NonnullRefPtr<Inode> inode, const String& name, int prot)
|
Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size, NonnullRefPtr<Inode> inode, const String& name, int prot)
|
||||||
{
|
{
|
||||||
auto range = allocate_range(vaddr, size);
|
auto range = allocate_range(vaddr, size);
|
||||||
|
@ -207,9 +213,10 @@ Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size,
|
||||||
return ®ion;
|
return ®ion;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible)
|
Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible)
|
||||||
{
|
{
|
||||||
size_t end_in_vmobject = offset_in_vmobject + size;
|
ASSERT(range.is_valid());
|
||||||
|
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
||||||
if (end_in_vmobject < offset_in_vmobject) {
|
if (end_in_vmobject < offset_in_vmobject) {
|
||||||
dbgprintf("allocate_region_with_vmobject: Overflow (offset + size)\n");
|
dbgprintf("allocate_region_with_vmobject: Overflow (offset + size)\n");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -222,9 +229,6 @@ Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size
|
||||||
dbgprintf("allocate_region_with_vmobject: Attempt to allocate a region with an end past the end of its VMObject.\n");
|
dbgprintf("allocate_region_with_vmobject: Attempt to allocate a region with an end past the end of its VMObject.\n");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
auto range = allocate_range(vaddr, size);
|
|
||||||
if (!range.is_valid())
|
|
||||||
return nullptr;
|
|
||||||
offset_in_vmobject &= PAGE_MASK;
|
offset_in_vmobject &= PAGE_MASK;
|
||||||
Region* region;
|
Region* region;
|
||||||
if (user_accessible)
|
if (user_accessible)
|
||||||
|
@ -235,6 +239,15 @@ Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible)
|
||||||
|
{
|
||||||
|
auto range = allocate_range(vaddr, size);
|
||||||
|
if (!range.is_valid())
|
||||||
|
return nullptr;
|
||||||
|
return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot, user_accessible);
|
||||||
|
}
|
||||||
|
|
||||||
bool Process::deallocate_region(Region& region)
|
bool Process::deallocate_region(Region& region)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
|
@ -364,11 +377,15 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params)
|
||||||
|
|
||||||
void* addr = (void*)params.addr;
|
void* addr = (void*)params.addr;
|
||||||
size_t size = params.size;
|
size_t size = params.size;
|
||||||
|
size_t alignment = params.alignment;
|
||||||
int prot = params.prot;
|
int prot = params.prot;
|
||||||
int flags = params.flags;
|
int flags = params.flags;
|
||||||
int fd = params.fd;
|
int fd = params.fd;
|
||||||
int offset = params.offset;
|
int offset = params.offset;
|
||||||
|
|
||||||
|
if (alignment & ~PAGE_MASK)
|
||||||
|
return (void*)-EINVAL;
|
||||||
|
|
||||||
if (!is_user_range(VirtualAddress(addr), size))
|
if (!is_user_range(VirtualAddress(addr), size))
|
||||||
return (void*)-EFAULT;
|
return (void*)-EFAULT;
|
||||||
|
|
||||||
|
@ -407,15 +424,19 @@ void* Process::sys$mmap(const Syscall::SC_mmap_params* user_params)
|
||||||
|
|
||||||
Region* region = nullptr;
|
Region* region = nullptr;
|
||||||
|
|
||||||
|
auto range = allocate_range(VirtualAddress(addr), size, alignment);
|
||||||
|
if (!range.is_valid())
|
||||||
|
return (void*)-ENOMEM;
|
||||||
|
|
||||||
if (map_purgeable) {
|
if (map_purgeable) {
|
||||||
auto vmobject = PurgeableVMObject::create_with_size(size);
|
auto vmobject = PurgeableVMObject::create_with_size(size);
|
||||||
region = allocate_region_with_vmobject(VirtualAddress(addr), size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
|
region = allocate_region_with_vmobject(range, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
|
||||||
if (!region && (!map_fixed && addr != 0))
|
if (!region && (!map_fixed && addr != 0))
|
||||||
region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
|
region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
|
||||||
} else if (map_anonymous) {
|
} else if (map_anonymous) {
|
||||||
region = allocate_region(VirtualAddress(addr), size, !name.is_null() ? name : "mmap", prot, false);
|
region = allocate_region(range, !name.is_null() ? name : "mmap", prot, false);
|
||||||
if (!region && (!map_fixed && addr != 0))
|
if (!region && (!map_fixed && addr != 0))
|
||||||
region = allocate_region({}, size, !name.is_null() ? name : "mmap", prot, false);
|
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, false);
|
||||||
} else {
|
} else {
|
||||||
if (offset < 0)
|
if (offset < 0)
|
||||||
return (void*)-EINVAL;
|
return (void*)-EINVAL;
|
||||||
|
|
|
@ -364,6 +364,8 @@ public:
|
||||||
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible = true);
|
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible = true);
|
||||||
Region* allocate_file_backed_region(VirtualAddress, size_t, NonnullRefPtr<Inode>, const String& name, int prot);
|
Region* allocate_file_backed_region(VirtualAddress, size_t, NonnullRefPtr<Inode>, const String& name, int prot);
|
||||||
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
|
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
|
||||||
|
Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool user_accessible = true);
|
||||||
|
Region* allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
|
||||||
bool deallocate_region(Region& region);
|
bool deallocate_region(Region& region);
|
||||||
|
|
||||||
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
|
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
|
||||||
|
@ -405,7 +407,7 @@ private:
|
||||||
Process(Thread*& first_thread, const String& name, uid_t, gid_t, pid_t ppid, RingLevel, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
Process(Thread*& first_thread, const String& name, uid_t, gid_t, pid_t ppid, RingLevel, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
||||||
static pid_t allocate_pid();
|
static pid_t allocate_pid();
|
||||||
|
|
||||||
Range allocate_range(VirtualAddress, size_t);
|
Range allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
||||||
|
|
||||||
Region& add_region(NonnullOwnPtr<Region>);
|
Region& add_region(NonnullOwnPtr<Region>);
|
||||||
|
|
||||||
|
|
|
@ -240,6 +240,7 @@ struct StringListArgument {
|
||||||
struct SC_mmap_params {
|
struct SC_mmap_params {
|
||||||
uint32_t addr;
|
uint32_t addr;
|
||||||
uint32_t size;
|
uint32_t size;
|
||||||
|
uint32_t alignment;
|
||||||
int32_t prot;
|
int32_t prot;
|
||||||
int32_t flags;
|
int32_t flags;
|
||||||
int32_t fd;
|
int32_t fd;
|
||||||
|
|
|
@ -94,7 +94,7 @@ void RangeAllocator::carve_at_index(int index, const Range& range)
|
||||||
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
|
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
Range RangeAllocator::allocate_anywhere(size_t size)
|
Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
#ifdef VM_GUARD_PAGES
|
#ifdef VM_GUARD_PAGES
|
||||||
// NOTE: We pad VM allocations with a guard page on each side.
|
// NOTE: We pad VM allocations with a guard page on each side.
|
||||||
|
@ -104,26 +104,32 @@ Range RangeAllocator::allocate_anywhere(size_t size)
|
||||||
size_t effective_size = size;
|
size_t effective_size = size;
|
||||||
size_t offset_from_effective_base = 0;
|
size_t offset_from_effective_base = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (int i = 0; i < m_available_ranges.size(); ++i) {
|
for (int i = 0; i < m_available_ranges.size(); ++i) {
|
||||||
auto& available_range = m_available_ranges[i];
|
auto& available_range = m_available_ranges[i];
|
||||||
if (available_range.size() < effective_size)
|
// FIXME: This check is probably excluding some valid candidates when using a large alignment.
|
||||||
|
if (available_range.size() < (effective_size + alignment))
|
||||||
continue;
|
continue;
|
||||||
Range allocated_range(available_range.base().offset(offset_from_effective_base), size);
|
|
||||||
if (available_range.size() == effective_size) {
|
uintptr_t initial_base = available_range.base().offset(offset_from_effective_base).get();
|
||||||
|
uintptr_t aligned_base = round_up_to_power_of_two(initial_base, alignment);
|
||||||
|
|
||||||
|
Range allocated_range(VirtualAddress(aligned_base), size);
|
||||||
|
if (available_range == allocated_range) {
|
||||||
#ifdef VRA_DEBUG
|
#ifdef VRA_DEBUG
|
||||||
dbgprintf("VRA: Allocated perfect-fit anywhere(%u): %x\n", size, allocated_range.base().get());
|
dbgprintf("VRA: Allocated perfect-fit anywhere(%zu, %zu): %x\n", size, alignment, allocated_range.base().get());
|
||||||
#endif
|
#endif
|
||||||
m_available_ranges.remove(i);
|
m_available_ranges.remove(i);
|
||||||
return allocated_range;
|
return allocated_range;
|
||||||
}
|
}
|
||||||
carve_at_index(i, allocated_range);
|
carve_at_index(i, allocated_range);
|
||||||
#ifdef VRA_DEBUG
|
#ifdef VRA_DEBUG
|
||||||
dbgprintf("VRA: Allocated anywhere(%u): %x\n", size, allocated_range.base().get());
|
dbgprintf("VRA: Allocated anywhere(%zu, %zu): %x\n", size, alignment, allocated_range.base().get());
|
||||||
dump();
|
dump();
|
||||||
#endif
|
#endif
|
||||||
return allocated_range;
|
return allocated_range;
|
||||||
}
|
}
|
||||||
kprintf("VRA: Failed to allocate anywhere: %u\n", size);
|
kprintf("VRA: Failed to allocate anywhere: %zu, %zu\n", size, alignment);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ public:
|
||||||
void initialize_with_range(VirtualAddress, size_t);
|
void initialize_with_range(VirtualAddress, size_t);
|
||||||
void initialize_from_parent(const RangeAllocator&);
|
void initialize_from_parent(const RangeAllocator&);
|
||||||
|
|
||||||
Range allocate_anywhere(size_t);
|
Range allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
||||||
Range allocate_specific(VirtualAddress, size_t);
|
Range allocate_specific(VirtualAddress, size_t);
|
||||||
void deallocate(Range);
|
void deallocate(Range);
|
||||||
|
|
||||||
|
|
|
@ -32,14 +32,9 @@
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
||||||
void* mmap(void* addr, size_t size, int prot, int flags, int fd, off_t offset)
|
void* serenity_mmap(void* addr, size_t size, int prot, int flags, int fd, off_t offset, size_t alignment, const char* name)
|
||||||
{
|
{
|
||||||
return mmap_with_name(addr, size, prot, flags, fd, offset, nullptr);
|
Syscall::SC_mmap_params params { (u32)addr, size, alignment, prot, flags, fd, offset, { name, name ? strlen(name) : 0 } };
|
||||||
}
|
|
||||||
|
|
||||||
void* mmap_with_name(void* addr, size_t size, int prot, int flags, int fd, off_t offset, const char* name)
|
|
||||||
{
|
|
||||||
Syscall::SC_mmap_params params { (u32)addr, size, prot, flags, fd, offset, { name, name ? strlen(name) : 0 } };
|
|
||||||
int rc = syscall(SC_mmap, ¶ms);
|
int rc = syscall(SC_mmap, ¶ms);
|
||||||
if (rc < 0 && -rc < EMAXERRNO) {
|
if (rc < 0 && -rc < EMAXERRNO) {
|
||||||
errno = -rc;
|
errno = -rc;
|
||||||
|
@ -48,6 +43,16 @@ void* mmap_with_name(void* addr, size_t size, int prot, int flags, int fd, off_t
|
||||||
return (void*)rc;
|
return (void*)rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* mmap(void* addr, size_t size, int prot, int flags, int fd, off_t offset)
|
||||||
|
{
|
||||||
|
return serenity_mmap(addr, size, prot, flags, fd, offset, PAGE_SIZE, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* mmap_with_name(void* addr, size_t size, int prot, int flags, int fd, off_t offset, const char* name)
|
||||||
|
{
|
||||||
|
return serenity_mmap(addr, size, prot, flags, fd, offset, PAGE_SIZE, name);
|
||||||
|
}
|
||||||
|
|
||||||
int munmap(void* addr, size_t size)
|
int munmap(void* addr, size_t size)
|
||||||
{
|
{
|
||||||
int rc = syscall(SC_munmap, addr, size);
|
int rc = syscall(SC_munmap, addr, size);
|
||||||
|
|
|
@ -53,6 +53,7 @@ __BEGIN_DECLS
|
||||||
|
|
||||||
void* mmap(void* addr, size_t, int prot, int flags, int fd, off_t);
|
void* mmap(void* addr, size_t, int prot, int flags, int fd, off_t);
|
||||||
void* mmap_with_name(void* addr, size_t, int prot, int flags, int fd, off_t, const char* name);
|
void* mmap_with_name(void* addr, size_t, int prot, int flags, int fd, off_t, const char* name);
|
||||||
|
void* serenity_mmap(void* addr, size_t, int prot, int flags, int fd, off_t, size_t alignment, const char* name);
|
||||||
int munmap(void*, size_t);
|
int munmap(void*, size_t);
|
||||||
int mprotect(void*, size_t, int prot);
|
int mprotect(void*, size_t, int prot);
|
||||||
int set_mmap_name(void*, size_t, const char*);
|
int set_mmap_name(void*, size_t, const char*);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue