1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 16:07:35 +00:00

LibSQL: Fix BTree corruption in TreeNode::split

After splitting a node, the new node was written to the same pointer as
the current node - probably a copy / paste error. This new code requires
a `.pointer() -> u32` to exist on the object to be serialized,
preventing this issue from happening again.

Fixes #15844.
This commit is contained in:
Jelle Raaijmakers 2022-11-26 01:17:43 +01:00 committed by Andreas Kling
parent e5e00a682b
commit 70a7bca920
7 changed files with 35 additions and 16 deletions

View file

@ -754,4 +754,23 @@ TEST_CASE(binary_operator_failure)
expect_failure(move(result), '&'); expect_failure(move(result), '&');
} }
TEST_CASE(describe_large_table_after_persist)
{
ScopeGuard guard([]() { unlink(db_name); });
{
auto database = SQL::Database::construct(db_name);
EXPECT(!database->open().is_error());
auto result = execute(database, "CREATE TABLE Cookies ( name TEXT, value TEXT, same_site INTEGER, creation_time INTEGER, last_access_time INTEGER, expiry_time INTEGER, domain TEXT, path TEXT, secure INTEGER, http_only INTEGER, host_only INTEGER, persistent INTEGER );");
EXPECT_EQ(result.command(), SQL::SQLCommand::Create);
}
{
auto database = SQL::Database::construct(db_name);
EXPECT(!database->open().is_error());
auto result = execute(database, "DESCRIBE TABLE Cookies;");
EXPECT_EQ(result.size(), 12u);
}
}
} }

View file

@ -56,7 +56,7 @@ TreeNode* BTree::new_root()
{ {
set_pointer(new_record_pointer()); set_pointer(new_record_pointer());
m_root = make<TreeNode>(*this, nullptr, m_root.leak_ptr(), pointer()); m_root = make<TreeNode>(*this, nullptr, m_root.leak_ptr(), pointer());
serializer().serialize_and_write(*m_root.ptr(), m_root->pointer()); serializer().serialize_and_write(*m_root.ptr());
if (on_new_root) if (on_new_root)
on_new_root(); on_new_root();
return m_root; return m_root;

View file

@ -232,7 +232,7 @@ bool BTreeIterator::update(Key const& new_value)
// We are friend of BTree and TreeNode. Don't know how I feel about that. // We are friend of BTree and TreeNode. Don't know how I feel about that.
m_current->m_entries[m_index] = new_value; m_current->m_entries[m_index] = new_value;
m_current->tree().serializer().serialize_and_write(*m_current, m_current->pointer()); m_current->tree().serializer().serialize_and_write(*m_current);
return true; return true;
} }

View file

@ -223,7 +223,7 @@ ErrorOr<void> Database::update(Row& tuple)
VERIFY(m_table_cache.get(tuple.table()->key().hash()).has_value()); VERIFY(m_table_cache.get(tuple.table()->key().hash()).has_value());
// TODO Check constraints // TODO Check constraints
m_serializer.reset(); m_serializer.reset();
m_serializer.serialize_and_write<Tuple>(tuple, tuple.pointer()); m_serializer.serialize_and_write<Tuple>(tuple);
// TODO update indexes defined on table. // TODO update indexes defined on table.
return {}; return {};

View file

@ -140,7 +140,7 @@ bool HashBucket::insert(Key const& key)
return false; return false;
} }
m_entries.append(key); m_entries.append(key);
m_hash_index.serializer().serialize_and_write(*this, pointer()); m_hash_index.serializer().serialize_and_write(*this);
return true; return true;
} }
@ -226,10 +226,10 @@ HashIndex::HashIndex(Serializer& serializer, NonnullRefPtr<TupleDescriptor> cons
} else { } else {
auto bucket = append_bucket(0u, 1u, new_record_pointer()); auto bucket = append_bucket(0u, 1u, new_record_pointer());
bucket->m_inflated = true; bucket->m_inflated = true;
serializer.serialize_and_write(*bucket, bucket->pointer()); serializer.serialize_and_write(*bucket);
bucket = append_bucket(1u, 1u, new_record_pointer()); bucket = append_bucket(1u, 1u, new_record_pointer());
bucket->m_inflated = true; bucket->m_inflated = true;
serializer.serialize_and_write(*bucket, bucket->pointer()); serializer.serialize_and_write(*bucket);
m_nodes.append(first_node); m_nodes.append(first_node);
write_directory_to_write_ahead_log(); write_directory_to_write_ahead_log();
} }
@ -283,7 +283,7 @@ HashBucket* HashIndex::get_bucket_for_insert(Key const& key)
} }
if (moved > 0) { if (moved > 0) {
dbgln_if(SQL_DEBUG, "Moved {} entries from bucket #{} to #{}", moved, base_index, ix); dbgln_if(SQL_DEBUG, "Moved {} entries from bucket #{} to #{}", moved, base_index, ix);
serializer().serialize_and_write(*sub_bucket, sub_bucket->pointer()); serializer().serialize_and_write(*sub_bucket);
} }
total_moved += moved; total_moved += moved;
} }
@ -292,7 +292,7 @@ HashBucket* HashIndex::get_bucket_for_insert(Key const& key)
else else
dbgln_if(SQL_DEBUG, "Nothing redistributed from bucket #{}", base_index); dbgln_if(SQL_DEBUG, "Nothing redistributed from bucket #{}", base_index);
bucket->set_local_depth(bucket->local_depth() + 1); bucket->set_local_depth(bucket->local_depth() + 1);
serializer().serialize_and_write(*bucket, bucket->pointer()); serializer().serialize_and_write(*bucket);
write_directory_to_write_ahead_log(); write_directory_to_write_ahead_log();
auto bucket_after_redistribution = get_bucket(key_hash % size()); auto bucket_after_redistribution = get_bucket(key_hash % size());
@ -327,7 +327,7 @@ void HashIndex::write_directory_to_write_ahead_log()
size_t num_node = 0u; size_t num_node = 0u;
while (offset < size()) { while (offset < size()) {
HashDirectoryNode node(*this, num_node, offset); HashDirectoryNode node(*this, num_node, offset);
serializer().serialize_and_write(node, node.pointer()); serializer().serialize_and_write(node);
offset += node.number_of_pointers(); offset += node.number_of_pointers();
} }
} }

View file

@ -108,12 +108,12 @@ public:
void serialize(String const&); void serialize(String const&);
template<typename T> template<typename T>
bool serialize_and_write(T const& t, u32 pointer) bool serialize_and_write(T const& t)
{ {
VERIFY(m_heap.ptr() != nullptr); VERIFY(m_heap.ptr() != nullptr);
reset(); reset();
serialize<T>(t); serialize<T>(t);
m_heap->add_to_wal(pointer, m_buffer); m_heap->add_to_wal(t.pointer(), m_buffer);
return true; return true;
} }

View file

@ -172,7 +172,7 @@ bool TreeNode::update_key_pointer(Key const& key)
if (m_entries[ix].pointer() != key.pointer()) { if (m_entries[ix].pointer() != key.pointer()) {
m_entries[ix].set_pointer(key.pointer()); m_entries[ix].set_pointer(key.pointer());
dump_if(SQL_DEBUG, "To WAL"); dump_if(SQL_DEBUG, "To WAL");
tree().serializer().serialize_and_write<TreeNode>(*this, pointer()); tree().serializer().serialize_and_write<TreeNode>(*this);
} }
return true; return true;
} }
@ -277,7 +277,7 @@ void TreeNode::just_insert(Key const& key, TreeNode* right)
split(); split();
} else { } else {
dump_if(SQL_DEBUG, "To WAL"); dump_if(SQL_DEBUG, "To WAL");
tree().serializer().serialize_and_write(*this, pointer()); tree().serializer().serialize_and_write(*this);
} }
return; return;
} }
@ -289,7 +289,7 @@ void TreeNode::just_insert(Key const& key, TreeNode* right)
split(); split();
} else { } else {
dump_if(SQL_DEBUG, "To WAL"); dump_if(SQL_DEBUG, "To WAL");
tree().serializer().serialize_and_write(*this, pointer()); tree().serializer().serialize_and_write(*this);
} }
} }
@ -327,9 +327,9 @@ void TreeNode::split()
auto median = m_entries.take_last(); auto median = m_entries.take_last();
dump_if(SQL_DEBUG, "Split Left To WAL"); dump_if(SQL_DEBUG, "Split Left To WAL");
tree().serializer().serialize_and_write(*this, pointer()); tree().serializer().serialize_and_write(*this);
new_node->dump_if(SQL_DEBUG, "Split Right to WAL"); new_node->dump_if(SQL_DEBUG, "Split Right to WAL");
tree().serializer().serialize_and_write(*new_node, pointer()); tree().serializer().serialize_and_write(*new_node);
m_up->just_insert(median, new_node); m_up->just_insert(median, new_node);
} }