mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 17:52:45 +00:00 
			
		
		
		
	Kernel: Add locks around RangeAllocator
We need to keep multiple processors from changing it at the same time.
This commit is contained in:
		
							parent
							
								
									66f46d03e4
								
							
						
					
					
						commit
						2b25a89ab5
					
				
					 2 changed files with 10 additions and 0 deletions
				
			
		|  | @ -44,12 +44,14 @@ void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size) | |||
|     m_total_range = { base, size }; | ||||
|     m_available_ranges.append({ base, size }); | ||||
| #ifdef VRA_DEBUG | ||||
|     ScopedSpinLock lock(m_lock); | ||||
|     dump(); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| void RangeAllocator::initialize_from_parent(const RangeAllocator& parent_allocator) | ||||
| { | ||||
|     ScopedSpinLock lock(parent_allocator.m_lock); | ||||
|     m_total_range = parent_allocator.m_total_range; | ||||
|     m_available_ranges = parent_allocator.m_available_ranges; | ||||
| } | ||||
|  | @ -60,6 +62,7 @@ RangeAllocator::~RangeAllocator() | |||
| 
 | ||||
| void RangeAllocator::dump() const | ||||
| { | ||||
|     ASSERT(m_lock.is_locked()); | ||||
|     dbg() << "RangeAllocator{" << this << "}"; | ||||
|     for (auto& range : m_available_ranges) { | ||||
|         dbg() << "    " << String::format("%x", range.base().get()) << " -> " << String::format("%x", range.end().get() - 1); | ||||
|  | @ -85,6 +88,7 @@ Vector<Range, 2> Range::carve(const Range& taken) | |||
| 
 | ||||
| void RangeAllocator::carve_at_index(int index, const Range& range) | ||||
| { | ||||
|     ASSERT(m_lock.is_locked()); | ||||
|     auto remaining_parts = m_available_ranges[index].carve(range); | ||||
|     ASSERT(remaining_parts.size() >= 1); | ||||
|     m_available_ranges[index] = remaining_parts[0]; | ||||
|  | @ -106,6 +110,7 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment) | |||
|     size_t offset_from_effective_base = 0; | ||||
| #endif | ||||
| 
 | ||||
|     ScopedSpinLock lock(m_lock); | ||||
|     for (size_t i = 0; i < m_available_ranges.size(); ++i) { | ||||
|         auto& available_range = m_available_ranges[i]; | ||||
|         // FIXME: This check is probably excluding some valid candidates when using a large alignment.
 | ||||
|  | @ -140,6 +145,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size) | |||
|         return {}; | ||||
| 
 | ||||
|     Range allocated_range(base, size); | ||||
|     ScopedSpinLock lock(m_lock); | ||||
|     for (size_t i = 0; i < m_available_ranges.size(); ++i) { | ||||
|         auto& available_range = m_available_ranges[i]; | ||||
|         if (!available_range.contains(base, size)) | ||||
|  | @ -161,6 +167,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size) | |||
| 
 | ||||
| void RangeAllocator::deallocate(Range range) | ||||
| { | ||||
|     ScopedSpinLock lock(m_lock); | ||||
|     ASSERT(m_total_range.contains(range)); | ||||
|     ASSERT(range.size()); | ||||
|     ASSERT(range.base() < range.end()); | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Tom
						Tom