mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 17:52:45 +00:00 
			
		
		
		
	Kernel: Harden sys$munmap Vector usage against OOM.
Theoretically the append should never fail as we have in-line storage of 2, which should be enough. However I'm planning on removing the non-try variants of AK::Vector when compiling in kernel mode in the future, so this will need to go eventually. I suppose it also protects against some unforeseen bug where we we can append more than 2 items.
This commit is contained in:
		
							parent
							
								
									569c5a8922
								
							
						
					
					
						commit
						0ca668f59c
					
				
					 1 changed files with 4 additions and 3 deletions
				
			
		|  | @ -505,7 +505,7 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size) | ||||||
|     // slow: without caching
 |     // slow: without caching
 | ||||||
|     const auto& regions = space().find_regions_intersecting(range_to_unmap); |     const auto& regions = space().find_regions_intersecting(range_to_unmap); | ||||||
| 
 | 
 | ||||||
|     // check if any of the regions is not mmapped, to not accientally
 |     // Check if any of the regions is not mmapped, to not accidentally
 | ||||||
|     // error-out with just half a region map left
 |     // error-out with just half a region map left
 | ||||||
|     for (auto* region : regions) { |     for (auto* region : regions) { | ||||||
|         if (!region->is_mmap()) |         if (!region->is_mmap()) | ||||||
|  | @ -530,8 +530,9 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size) | ||||||
|         // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
 |         // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
 | ||||||
|         region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); |         region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); | ||||||
| 
 | 
 | ||||||
|         // otherwise just split the regions and collect them for future mapping
 |         // Otherwise just split the regions and collect them for future mapping
 | ||||||
|         new_regions.append(space().split_region_around_range(*region, range_to_unmap)); |         if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap))) | ||||||
|  |             return ENOMEM; | ||||||
|     } |     } | ||||||
|     // Instead we give back the unwanted VM manually at the end.
 |     // Instead we give back the unwanted VM manually at the end.
 | ||||||
|     space().page_directory().range_allocator().deallocate(range_to_unmap); |     space().page_directory().range_allocator().deallocate(range_to_unmap); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Brian Gianforcaro
						Brian Gianforcaro