mirror of
https://github.com/RGBCube/serenity
synced 2025-07-24 21:07:34 +00:00
Kernel: Remove MAP_PURGEABLE from mmap
This brings mmap more in line with other operating systems. Prior to this, it was impossible to request memory that was definitely committed, instead MAP_PURGEABLE would provide a region that was not actually purgeable, but also not fully committed, which meant that using such memory still could cause crashes when the underlying pages could no longer be allocated. This fixes some random crashes in low-memory situations where non-volatile memory is mapped (e.g. malloc, tls, Gfx::Bitmap, etc) but when a page in these regions is first accessed, there is insufficient physical memory available to commit a new page.
This commit is contained in:
parent
c3451899bc
commit
e21cc4cff6
6 changed files with 12 additions and 20 deletions
|
@ -144,7 +144,7 @@ Region& Process::allocate_split_region(const Region& source_region, const Range&
|
||||||
Region* Process::allocate_region(const Range& range, const String& name, int prot, bool should_commit)
|
Region* Process::allocate_region(const Range& range, const String& name, int prot, bool should_commit)
|
||||||
{
|
{
|
||||||
ASSERT(range.is_valid());
|
ASSERT(range.is_valid());
|
||||||
auto vmobject = AnonymousVMObject::create_with_size(range.size());
|
auto vmobject = PurgeableVMObject::create_with_size(range.size());
|
||||||
auto region = Region::create_user_accessible(this, range, vmobject, 0, name, prot_to_region_access_flags(prot));
|
auto region = Region::create_user_accessible(this, range, vmobject, 0, name, prot_to_region_access_flags(prot));
|
||||||
if (!region->map(page_directory()))
|
if (!region->map(page_directory()))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
|
@ -116,7 +116,6 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
|
||||||
|
|
||||||
bool map_shared = flags & MAP_SHARED;
|
bool map_shared = flags & MAP_SHARED;
|
||||||
bool map_anonymous = flags & MAP_ANONYMOUS;
|
bool map_anonymous = flags & MAP_ANONYMOUS;
|
||||||
bool map_purgeable = flags & MAP_PURGEABLE;
|
|
||||||
bool map_private = flags & MAP_PRIVATE;
|
bool map_private = flags & MAP_PRIVATE;
|
||||||
bool map_stack = flags & MAP_STACK;
|
bool map_stack = flags & MAP_STACK;
|
||||||
bool map_fixed = flags & MAP_FIXED;
|
bool map_fixed = flags & MAP_FIXED;
|
||||||
|
@ -136,19 +135,13 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
|
||||||
|
|
||||||
Region* region = nullptr;
|
Region* region = nullptr;
|
||||||
Optional<Range> range;
|
Optional<Range> range;
|
||||||
if (map_purgeable || map_anonymous) {
|
if (map_noreserve || map_anonymous) {
|
||||||
range = allocate_range(VirtualAddress(addr), size, alignment);
|
range = allocate_range(VirtualAddress(addr), size, alignment);
|
||||||
if (!range.value().is_valid())
|
if (!range.value().is_valid())
|
||||||
return (void*)-ENOMEM;
|
return (void*)-ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map_purgeable) {
|
if (map_anonymous) {
|
||||||
|
|
||||||
auto vmobject = PurgeableVMObject::create_with_size(size);
|
|
||||||
region = allocate_region_with_vmobject(range.value(), vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
|
|
||||||
if (!region && (!map_fixed && addr != 0))
|
|
||||||
region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
|
|
||||||
} else if (map_anonymous) {
|
|
||||||
region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, !map_noreserve);
|
region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, !map_noreserve);
|
||||||
if (!region && (!map_fixed && addr != 0))
|
if (!region && (!map_fixed && addr != 0))
|
||||||
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, !map_noreserve);
|
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, !map_noreserve);
|
||||||
|
@ -437,7 +430,7 @@ void* Process::sys$mremap(Userspace<const Syscall::SC_mremap_params*> user_param
|
||||||
if (!old_region->is_mmap())
|
if (!old_region->is_mmap())
|
||||||
return (void*)-EPERM;
|
return (void*)-EPERM;
|
||||||
|
|
||||||
if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & MAP_ANONYMOUS) && !(params.flags & MAP_PURGEABLE)) {
|
if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & (MAP_ANONYMOUS | MAP_NORESERVE))) {
|
||||||
auto range = old_region->range();
|
auto range = old_region->range();
|
||||||
auto old_name = old_region->name();
|
auto old_name = old_region->name();
|
||||||
auto old_prot = region_access_flags_to_prot(old_region->access());
|
auto old_prot = region_access_flags_to_prot(old_region->access());
|
||||||
|
|
|
@ -92,8 +92,7 @@ enum {
|
||||||
#define MAP_ANONYMOUS 0x20
|
#define MAP_ANONYMOUS 0x20
|
||||||
#define MAP_ANON MAP_ANONYMOUS
|
#define MAP_ANON MAP_ANONYMOUS
|
||||||
#define MAP_STACK 0x40
|
#define MAP_STACK 0x40
|
||||||
#define MAP_PURGEABLE 0x80
|
#define MAP_NORESERVE 0x80
|
||||||
#define MAP_NORESERVE 0x100
|
|
||||||
|
|
||||||
#define PROT_READ 0x1
|
#define PROT_READ 0x1
|
||||||
#define PROT_WRITE 0x2
|
#define PROT_WRITE 0x2
|
||||||
|
|
|
@ -154,7 +154,7 @@ extern "C" {
|
||||||
|
|
||||||
static void* os_alloc(size_t size, const char* name)
|
static void* os_alloc(size_t size, const char* name)
|
||||||
{
|
{
|
||||||
auto* ptr = serenity_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_PURGEABLE, 0, 0, ChunkedBlock::block_size, name);
|
auto* ptr = serenity_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, ChunkedBlock::block_size, name);
|
||||||
ASSERT(ptr != MAP_FAILED);
|
ASSERT(ptr != MAP_FAILED);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
#define MAP_ANONYMOUS 0x20
|
#define MAP_ANONYMOUS 0x20
|
||||||
#define MAP_ANON MAP_ANONYMOUS
|
#define MAP_ANON MAP_ANONYMOUS
|
||||||
#define MAP_STACK 0x40
|
#define MAP_STACK 0x40
|
||||||
#define MAP_PURGEABLE 0x80
|
#define MAP_NORESERVE 0x80
|
||||||
|
|
||||||
#define PROT_READ 0x1
|
#define PROT_READ 0x1
|
||||||
#define PROT_WRITE 0x2
|
#define PROT_WRITE 0x2
|
||||||
|
|
|
@ -437,13 +437,13 @@ Optional<BackingStore> Bitmap::allocate_backing_store(BitmapFormat format, const
|
||||||
const auto pitch = minimum_pitch(size.width(), format);
|
const auto pitch = minimum_pitch(size.width(), format);
|
||||||
const auto data_size_in_bytes = size_in_bytes(pitch, size.height());
|
const auto data_size_in_bytes = size_in_bytes(pitch, size.height());
|
||||||
|
|
||||||
void* data = nullptr;
|
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
|
||||||
|
if (purgeable == Purgeable::Yes)
|
||||||
|
map_flags |= MAP_NORESERVE;
|
||||||
#ifdef __serenity__
|
#ifdef __serenity__
|
||||||
int map_flags = purgeable == Purgeable::Yes ? (MAP_PURGEABLE | MAP_PRIVATE) : (MAP_ANONYMOUS | MAP_PRIVATE);
|
void* data = mmap_with_name(nullptr, data_size_in_bytes, PROT_READ | PROT_WRITE, map_flags, 0, 0, String::format("GraphicsBitmap [%dx%d]", size.width(), size.height()).characters());
|
||||||
data = mmap_with_name(nullptr, data_size_in_bytes, PROT_READ | PROT_WRITE, map_flags, 0, 0, String::format("GraphicsBitmap [%dx%d]", size.width(), size.height()).characters());
|
|
||||||
#else
|
#else
|
||||||
int map_flags = (MAP_ANONYMOUS | MAP_PRIVATE);
|
void* data = mmap(nullptr, data_size_in_bytes, PROT_READ | PROT_WRITE, map_flags, 0, 0);
|
||||||
data = mmap(nullptr, data_size_in_bytes, PROT_READ | PROT_WRITE, map_flags, 0, 0);
|
|
||||||
#endif
|
#endif
|
||||||
if (data == MAP_FAILED) {
|
if (data == MAP_FAILED) {
|
||||||
perror("mmap");
|
perror("mmap");
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue