mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 19:17:44 +00:00
Kernel: Remove krealloc()
This was only used by a single class (AK::ByteBuffer) in the kernel and not in an OOM-safe way. Now that ByteBuffer no longer uses it, there's no need for the kernel heap to burden itself with supporting this.
This commit is contained in:
parent
966880eb45
commit
25e850ebb1
4 changed files with 0 additions and 48 deletions
|
@ -17,7 +17,6 @@
|
||||||
# define kmalloc malloc
|
# define kmalloc malloc
|
||||||
# define kmalloc_good_size malloc_good_size
|
# define kmalloc_good_size malloc_good_size
|
||||||
# define kfree free
|
# define kfree free
|
||||||
# define krealloc realloc
|
|
||||||
|
|
||||||
inline void kfree_sized(void* ptr, size_t)
|
inline void kfree_sized(void* ptr, size_t)
|
||||||
{
|
{
|
||||||
|
|
|
@ -118,34 +118,6 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename MainHeap>
|
|
||||||
void* reallocate(void* ptr, size_t new_size, MainHeap& h)
|
|
||||||
{
|
|
||||||
if (!ptr)
|
|
||||||
return h.allocate(new_size);
|
|
||||||
|
|
||||||
auto* a = allocation_header(ptr);
|
|
||||||
VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
|
|
||||||
VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
|
|
||||||
|
|
||||||
size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE - sizeof(AllocationHeader);
|
|
||||||
|
|
||||||
if (old_size == new_size)
|
|
||||||
return ptr;
|
|
||||||
|
|
||||||
auto* new_ptr = h.allocate(new_size);
|
|
||||||
if (new_ptr) {
|
|
||||||
__builtin_memcpy(new_ptr, ptr, min(old_size, new_size));
|
|
||||||
deallocate(ptr);
|
|
||||||
}
|
|
||||||
return new_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void* reallocate(void* ptr, size_t new_size)
|
|
||||||
{
|
|
||||||
return reallocate(ptr, new_size, *this);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool contains(const void* ptr) const
|
bool contains(const void* ptr) const
|
||||||
{
|
{
|
||||||
const auto* a = allocation_header(ptr);
|
const auto* a = allocation_header(ptr);
|
||||||
|
@ -319,17 +291,6 @@ public:
|
||||||
VERIFY_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
void* reallocate(void* ptr, size_t new_size)
|
|
||||||
{
|
|
||||||
if (!ptr)
|
|
||||||
return allocate(new_size);
|
|
||||||
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
|
|
||||||
if (subheap->heap.contains(ptr))
|
|
||||||
return subheap->heap.reallocate(ptr, new_size, *this);
|
|
||||||
}
|
|
||||||
VERIFY_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapType& add_subheap(void* memory, size_t memory_size)
|
HeapType& add_subheap(void* memory, size_t memory_size)
|
||||||
{
|
{
|
||||||
VERIFY(memory_size > sizeof(SubHeap));
|
VERIFY(memory_size > sizeof(SubHeap));
|
||||||
|
|
|
@ -298,13 +298,6 @@ void kfree(void* ptr)
|
||||||
--g_nested_kfree_calls;
|
--g_nested_kfree_calls;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* krealloc(void* ptr, size_t new_size)
|
|
||||||
{
|
|
||||||
kmalloc_verify_nospinlock_held();
|
|
||||||
ScopedSpinLock lock(s_lock);
|
|
||||||
return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t kmalloc_good_size(size_t size)
|
size_t kmalloc_good_size(size_t size)
|
||||||
{
|
{
|
||||||
return size;
|
return size;
|
||||||
|
|
|
@ -40,7 +40,6 @@ void kmalloc_init();
|
||||||
[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_impl(size_t);
|
[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_impl(size_t);
|
||||||
[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_eternal(size_t);
|
[[gnu::malloc, gnu::returns_nonnull, gnu::alloc_size(1)]] void* kmalloc_eternal(size_t);
|
||||||
|
|
||||||
void* krealloc(void*, size_t);
|
|
||||||
void kfree(void*);
|
void kfree(void*);
|
||||||
void kfree_sized(void*, size_t);
|
void kfree_sized(void*, size_t);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue