mirror of
https://github.com/RGBCube/serenity
synced 2025-07-24 21:57:35 +00:00
Kernel/MM: Don't allocate a temporary Vector when parsing the memory map
Instead we can achieve the same by just using an optional.
This commit is contained in:
parent
adac43ab1c
commit
23d6c88027
1 changed files with 16 additions and 9 deletions
|
@ -295,8 +295,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
PhysicalAddress upper;
|
PhysicalAddress upper;
|
||||||
};
|
};
|
||||||
|
|
||||||
Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
|
Optional<ContiguousPhysicalVirtualRange> last_contiguous_physical_range;
|
||||||
|
|
||||||
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
||||||
// We have to copy these onto the stack, because we take a reference to these when printing them out,
|
// We have to copy these onto the stack, because we take a reference to these when printing them out,
|
||||||
// and doing so on a packed struct field is UB.
|
// and doing so on a packed struct field is UB.
|
||||||
|
@ -364,6 +363,8 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: This might have a nicer solution than slicing the ranges apart,
|
||||||
|
// to just put them back together when we dont find a used range in them
|
||||||
for (PhysicalSize page_base = address; page_base <= (address + length); page_base += PAGE_SIZE) {
|
for (PhysicalSize page_base = address; page_base <= (address + length); page_base += PAGE_SIZE) {
|
||||||
auto addr = PhysicalAddress(page_base);
|
auto addr = PhysicalAddress(page_base);
|
||||||
|
|
||||||
|
@ -372,24 +373,30 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
for (auto& used_range : global_data.used_memory_ranges) {
|
for (auto& used_range : global_data.used_memory_ranges) {
|
||||||
if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
|
if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
|
||||||
should_skip = true;
|
should_skip = true;
|
||||||
|
page_base = used_range.end.get();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (should_skip)
|
if (should_skip)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) {
|
if (!last_contiguous_physical_range.has_value() || last_contiguous_physical_range->upper.offset(PAGE_SIZE) != addr) {
|
||||||
contiguous_physical_ranges.append(ContiguousPhysicalVirtualRange {
|
if (last_contiguous_physical_range.has_value()) {
|
||||||
.lower = addr,
|
auto range = last_contiguous_physical_range.release_value();
|
||||||
.upper = addr,
|
// FIXME: OOM?
|
||||||
});
|
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
|
||||||
|
}
|
||||||
|
last_contiguous_physical_range = ContiguousPhysicalVirtualRange { .lower = addr, .upper = addr };
|
||||||
} else {
|
} else {
|
||||||
contiguous_physical_ranges.last().upper = addr;
|
last_contiguous_physical_range->upper = addr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& range : contiguous_physical_ranges) {
|
// FIXME: If this is ever false, theres a good chance that all physical memory is already spent
|
||||||
|
if (last_contiguous_physical_range.has_value()) {
|
||||||
|
auto range = last_contiguous_physical_range.release_value();
|
||||||
|
// FIXME: OOM?
|
||||||
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
|
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue