1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 08:17:35 +00:00

LibSQL: Keep track of free heap blocks when trimming storage

When overwriting existing heap storage that requires fewer blocks, make
sure to free all remaining blocks so they can be reused in the future.
This commit is contained in:
Jelle Raaijmakers 2023-05-24 14:53:43 +02:00 committed by Tim Flynn
parent c5ebc4bb40
commit a6abc1697f
3 changed files with 75 additions and 6 deletions

View file

@ -80,7 +80,15 @@ ErrorOr<size_t> Heap::file_size_in_bytes() const
bool Heap::has_block(Block::Index index) const
{
return index <= m_highest_block_written || m_write_ahead_log.contains(index);
return (index <= m_highest_block_written || m_write_ahead_log.contains(index))
&& !m_free_block_indices.contains_slow(index);
}
Block::Index Heap::request_new_block_index()
{
if (!m_free_block_indices.is_empty())
return m_free_block_indices.take_last();
return m_next_block++;
}
ErrorOr<ByteBuffer> Heap::read_storage(Block::Index index)
@ -107,21 +115,23 @@ ErrorOr<void> Heap::write_storage(Block::Index index, ReadonlyBytes data)
// Split up the storage across multiple blocks if necessary, creating a chain
u32 remaining_size = static_cast<u32>(data.size());
u32 offset_in_data = 0;
Block::Index existing_next_block_index = 0;
while (remaining_size > 0) {
auto block_data_size = AK::min(remaining_size, Block::DATA_SIZE);
remaining_size -= block_data_size;
ByteBuffer block_data;
Block::Index next_block_index = 0;
if (has_block(index)) {
auto existing_block = TRY(read_block(index));
block_data = existing_block.data();
TRY(block_data.try_resize(block_data_size));
next_block_index = existing_block.next_block();
existing_next_block_index = existing_block.next_block();
} else {
block_data = TRY(ByteBuffer::create_uninitialized(block_data_size));
existing_next_block_index = 0;
}
Block::Index next_block_index = existing_next_block_index;
if (next_block_index == 0 && remaining_size > 0)
next_block_index = request_new_block_index();
else if (remaining_size == 0)
@ -133,6 +143,14 @@ ErrorOr<void> Heap::write_storage(Block::Index index, ReadonlyBytes data)
index = next_block_index;
offset_in_data += block_data_size;
}
// Free remaining blocks in existing chain, if any
while (existing_next_block_index > 0) {
auto existing_block = TRY(read_block(existing_next_block_index));
existing_next_block_index = existing_block.next_block();
TRY(free_block(existing_block));
}
return {};
}
@ -207,13 +225,28 @@ ErrorOr<void> Heap::write_block(Block const& block)
return write_raw_block_to_wal(block.index(), move(heap_data));
}
ErrorOr<void> Heap::free_block(Block const& block)
{
auto index = block.index();
dbgln_if(SQL_DEBUG, "{}({})", __FUNCTION__, index);
VERIFY(index > 0);
VERIFY(has_block(index));
// Zero out freed blocks to facilitate a free block scan upon opening the database later
auto zeroed_data = TRY(ByteBuffer::create_zeroed(Block::SIZE));
TRY(write_raw_block_to_wal(index, move(zeroed_data)));
return m_free_block_indices.try_append(index);
}
ErrorOr<void> Heap::flush()
{
VERIFY(m_file);
auto indices = m_write_ahead_log.keys();
quick_sort(indices);
for (auto index : indices) {
dbgln_if(SQL_DEBUG, "Flushing block {} to {}", index, name());
dbgln_if(SQL_DEBUG, "Flushing block {}", index);
auto& data = m_write_ahead_log.get(index).value();
TRY(write_raw_block(index, data));
}