1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 02:48:11 +00:00

LibSQL: Make TupleDescriptor a shared pointer instead of a stack object

Tuple descriptors are basically the same for for example all rows in
a table. Makes sense to share them instead of copying them for every
single row.
This commit is contained in:
Jan de Visser 2021-07-13 13:47:08 -04:00 committed by Andreas Kling
parent 9e225d2d05
commit a5e28f2897
17 changed files with 95 additions and 94 deletions

View file

@ -126,8 +126,8 @@ void insert_into_and_scan_btree(int num_keys);
NonnullRefPtr<SQL::BTree> setup_btree(SQL::Heap& heap)
{
SQL::TupleDescriptor tuple_descriptor;
tuple_descriptor.append({ "key_value", SQL::SQLType::Integer, SQL::Order::Ascending });
NonnullRefPtr<SQL::TupleDescriptor> tuple_descriptor = adopt_ref(*new SQL::TupleDescriptor);
tuple_descriptor->append({ "key_value", SQL::SQLType::Integer, SQL::Order::Ascending });
auto root_pointer = heap.user_value(0);
if (!root_pointer) {
@ -186,6 +186,7 @@ void insert_into_and_scan_btree(int num_keys)
k.set_pointer(pointers[ix]);
btree->insert(k);
}
#ifdef LIST_TREE
btree->list_tree();
#endif

View file

@ -123,9 +123,9 @@ void insert_into_and_scan_hash_index(int num_keys);
NonnullRefPtr<SQL::HashIndex> setup_hash_index(SQL::Heap& heap)
{
SQL::TupleDescriptor tuple_descriptor;
tuple_descriptor.append({ "key_value", SQL::SQLType::Integer, SQL::Order::Ascending });
tuple_descriptor.append({ "text_value", SQL::SQLType::Text, SQL::Order::Ascending });
NonnullRefPtr<SQL::TupleDescriptor> tuple_descriptor = adopt_ref(*new SQL::TupleDescriptor);
tuple_descriptor->append({ "key_value", SQL::SQLType::Integer, SQL::Order::Ascending });
tuple_descriptor->append({ "text_value", SQL::SQLType::Text, SQL::Order::Ascending });
auto directory_pointer = heap.user_value(0);
if (!directory_pointer) {

View file

@ -135,9 +135,9 @@ TEST_CASE(order_int_values)
TEST_CASE(tuple)
{
SQL::TupleDescriptor descriptor;
descriptor.append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor.append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
NonnullRefPtr<SQL::TupleDescriptor> descriptor = adopt_ref(*new SQL::TupleDescriptor);
descriptor->append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor->append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
SQL::Tuple tuple(descriptor);
tuple["col1"] = "Test";
@ -148,9 +148,9 @@ TEST_CASE(tuple)
TEST_CASE(serialize_tuple)
{
SQL::TupleDescriptor descriptor;
descriptor.append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor.append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
NonnullRefPtr<SQL::TupleDescriptor> descriptor = adopt_ref(*new SQL::TupleDescriptor);
descriptor->append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor->append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
SQL::Tuple tuple(descriptor);
tuple["col1"] = "Test";
@ -169,9 +169,9 @@ TEST_CASE(serialize_tuple)
TEST_CASE(copy_tuple)
{
SQL::TupleDescriptor descriptor;
descriptor.append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor.append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
NonnullRefPtr<SQL::TupleDescriptor> descriptor = adopt_ref(*new SQL::TupleDescriptor);
descriptor->append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor->append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
SQL::Tuple tuple(descriptor);
tuple["col1"] = "Test";
@ -187,9 +187,9 @@ TEST_CASE(copy_tuple)
TEST_CASE(compare_tuples)
{
SQL::TupleDescriptor descriptor;
descriptor.append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor.append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
NonnullRefPtr<SQL::TupleDescriptor> descriptor = adopt_ref(*new SQL::TupleDescriptor);
descriptor->append({ "col1", SQL::SQLType::Text, SQL::Order::Ascending });
descriptor->append({ "col2", SQL::SQLType::Integer, SQL::Order::Descending });
SQL::Tuple tuple1(descriptor);
tuple1["col1"] = "Test";

View file

@ -10,13 +10,13 @@
namespace SQL {
BTree::BTree(Heap& heap, TupleDescriptor const& descriptor, bool unique, u32 pointer)
BTree::BTree(Heap& heap, NonnullRefPtr<TupleDescriptor> const& descriptor, bool unique, u32 pointer)
: Index(heap, descriptor, unique, pointer)
, m_root(nullptr)
{
}
BTree::BTree(Heap& heap, TupleDescriptor const& descriptor, u32 pointer)
BTree::BTree(Heap& heap, NonnullRefPtr<TupleDescriptor> const& descriptor, u32 pointer)
: BTree(heap, descriptor, true, pointer)
{
}

View file

@ -111,8 +111,8 @@ public:
Function<void(void)> on_new_root;
private:
BTree(Heap& heap, TupleDescriptor const&, bool unique, u32 pointer);
BTree(Heap& heap, TupleDescriptor const&, u32 pointer);
BTree(Heap& heap, NonnullRefPtr<TupleDescriptor> const&, bool unique, u32 pointer);
BTree(Heap& heap, NonnullRefPtr<TupleDescriptor> const&, u32 pointer);
void initialize_root();
TreeNode* new_root();
OwnPtr<TreeNode> m_root { nullptr };

View file

@ -85,7 +85,7 @@ void HashBucket::serialize(ByteBuffer& buffer) const
{
dbgln_if(SQL_DEBUG, "Serializing bucket: pointer {}, index #{}, local depth {} size {}",
pointer(), index(), local_depth(), size());
dbgln_if(SQL_DEBUG, "key_length: {} max_entries: {}", m_hash_index.descriptor().data_length(), max_entries_in_bucket());
dbgln_if(SQL_DEBUG, "key_length: {} max_entries: {}", m_hash_index.descriptor()->data_length(), max_entries_in_bucket());
serialize_to(buffer, local_depth());
serialize_to(buffer, size());
dbgln_if(SQL_DEBUG, "buffer size after prolog {}", buffer.size());
@ -117,7 +117,7 @@ void HashBucket::inflate()
size_t HashBucket::max_entries_in_bucket() const
{
auto key_size = m_hash_index.descriptor().data_length() + sizeof(u32);
auto key_size = m_hash_index.descriptor()->data_length() + sizeof(u32);
return (BLOCKSIZE - 2 * sizeof(u32)) / key_size;
}
@ -194,7 +194,7 @@ void HashBucket::list_bucket()
}
}
HashIndex::HashIndex(Heap& heap, TupleDescriptor const& descriptor, u32 first_node)
HashIndex::HashIndex(Heap& heap, NonnullRefPtr<TupleDescriptor> const& descriptor, u32 first_node)
: Index(heap, descriptor, true, first_node)
, m_nodes()
, m_buckets()

View file

@ -88,7 +88,7 @@ public:
void list_hash();
private:
HashIndex(Heap&, TupleDescriptor const&, u32);
HashIndex(Heap&, NonnullRefPtr<TupleDescriptor> const&, u32);
void expand();
void write_directory_to_write_ahead_log();

View file

@ -10,7 +10,7 @@
namespace SQL {
Index::Index(Heap& heap, TupleDescriptor const& descriptor, bool unique, u32 pointer)
Index::Index(Heap& heap, NonnullRefPtr<TupleDescriptor> const& descriptor, bool unique, u32 pointer)
: m_heap(heap)
, m_descriptor(descriptor)
, m_unique(unique)
@ -18,7 +18,7 @@ Index::Index(Heap& heap, TupleDescriptor const& descriptor, bool unique, u32 poi
{
}
Index::Index(Heap& heap, TupleDescriptor const& descriptor, u32 pointer)
Index::Index(Heap& heap, NonnullRefPtr<TupleDescriptor> const& descriptor, u32 pointer)
: m_heap(heap)
, m_descriptor(descriptor)
, m_pointer(pointer)

View file

@ -37,14 +37,14 @@ class Index : public Core::Object {
public:
~Index() override = default;
TupleDescriptor descriptor() const { return m_descriptor; }
NonnullRefPtr<TupleDescriptor> descriptor() const { return m_descriptor; }
[[nodiscard]] bool duplicates_allowed() const { return !m_unique; }
[[nodiscard]] bool unique() const { return m_unique; }
[[nodiscard]] u32 pointer() const { return m_pointer; }
protected:
Index(Heap& heap, TupleDescriptor const&, bool unique, u32 pointer);
Index(Heap& heap, TupleDescriptor const&, u32 pointer);
Index(Heap& heap, NonnullRefPtr<TupleDescriptor> const&, bool unique, u32 pointer);
Index(Heap& heap, NonnullRefPtr<TupleDescriptor> const&, u32 pointer);
[[nodiscard]] Heap const& heap() const { return m_heap; }
[[nodiscard]] Heap& heap() { return m_heap; }
@ -55,7 +55,7 @@ protected:
private:
Heap& m_heap;
TupleDescriptor m_descriptor;
NonnullRefPtr<TupleDescriptor> m_descriptor;
bool m_unique { false };
u32 m_pointer { 0 };
};

View file

@ -9,18 +9,23 @@
namespace SQL {
Key::Key(TupleDescriptor const& descriptor)
Key::Key()
: Tuple()
{
}
Key::Key(NonnullRefPtr<TupleDescriptor> const& descriptor)
: Tuple(descriptor)
{
}
Key::Key(RefPtr<IndexDef> index)
Key::Key(NonnullRefPtr<IndexDef> index)
: Tuple(index->to_tuple_descriptor())
, m_index(index)
{
}
Key::Key(TupleDescriptor const& descriptor, ByteBuffer& buffer, size_t& offset)
Key::Key(NonnullRefPtr<TupleDescriptor> const& descriptor, ByteBuffer& buffer, size_t& offset)
: Tuple(descriptor, buffer, offset)
{
}

View file

@ -14,16 +14,17 @@ namespace SQL {
class Key : public Tuple {
public:
Key() = default;
explicit Key(TupleDescriptor const&);
explicit Key(RefPtr<IndexDef>);
Key(TupleDescriptor const&, ByteBuffer&, size_t& offset);
Key();
explicit Key(NonnullRefPtr<TupleDescriptor> const&);
explicit Key(NonnullRefPtr<IndexDef>);
Key(NonnullRefPtr<TupleDescriptor> const&, ByteBuffer&, size_t& offset);
Key(RefPtr<IndexDef>, ByteBuffer&, size_t& offset);
Key(Key const&) = default;
RefPtr<IndexDef> index() const { return m_index; }
[[nodiscard]] virtual size_t data_length() const override { return Tuple::data_length() + sizeof(u32); }
private:
RefPtr<IndexDef> m_index;
RefPtr<IndexDef> m_index { nullptr };
};
}

View file

@ -102,11 +102,11 @@ void IndexDef::append_column(String name, SQLType sql_type, Order sort_order)
m_key_definition.append(part);
}
TupleDescriptor IndexDef::to_tuple_descriptor() const
NonnullRefPtr<TupleDescriptor> IndexDef::to_tuple_descriptor() const
{
TupleDescriptor ret;
NonnullRefPtr<TupleDescriptor> ret = adopt_ref(*new TupleDescriptor);
for (auto& part : m_key_definition) {
ret.append({ part.name(), part.type(), part.sort_order() });
ret->append({ part.name(), part.type(), part.sort_order() });
}
return ret;
}
@ -145,11 +145,11 @@ TableDef::TableDef(SchemaDef* schema, String name)
{
}
TupleDescriptor TableDef::to_tuple_descriptor() const
NonnullRefPtr<TupleDescriptor> TableDef::to_tuple_descriptor() const
{
TupleDescriptor ret;
NonnullRefPtr<TupleDescriptor> ret = adopt_ref(*new TupleDescriptor);
for (auto& part : m_columns) {
ret.append({ part.name(), part.type(), Order::Ascending });
ret->append({ part.name(), part.type(), Order::Ascending });
}
return ret;
}

View file

@ -107,7 +107,7 @@ public:
[[nodiscard]] size_t size() const { return m_key_definition.size(); }
void append_column(String, SQLType, Order = Order::Ascending);
Key key() const override;
[[nodiscard]] TupleDescriptor to_tuple_descriptor() const;
[[nodiscard]] NonnullRefPtr<TupleDescriptor> to_tuple_descriptor() const;
static NonnullRefPtr<IndexDef> index_def();
static Key make_key(TableDef const& table_def);
@ -132,7 +132,7 @@ public:
size_t num_indexes() { return m_indexes.size(); }
NonnullRefPtrVector<ColumnDef> columns() const { return m_columns; }
NonnullRefPtrVector<IndexDef> indexes() const { return m_indexes; }
[[nodiscard]] TupleDescriptor to_tuple_descriptor() const;
[[nodiscard]] NonnullRefPtr<TupleDescriptor> to_tuple_descriptor() const;
static NonnullRefPtr<IndexDef> index_def();
static Key make_key(SchemaDef const& schema_def);

View file

@ -182,7 +182,7 @@ bool TreeNode::insert_in_leaf(Key const& key)
size_t TreeNode::max_keys_in_node()
{
auto descriptor = m_tree.descriptor();
auto key_size = descriptor.data_length() + sizeof(u32);
auto key_size = descriptor->data_length() + sizeof(u32);
auto ret = (BLOCKSIZE - 2 * sizeof(u32)) / key_size;
if ((ret % 2) == 0)
--ret;

View file

@ -16,28 +16,28 @@
namespace SQL {
Tuple::Tuple()
: m_descriptor()
: m_descriptor(adopt_ref(*new TupleDescriptor))
, m_data()
{
}
Tuple::Tuple(TupleDescriptor const& descriptor, u32 pointer)
Tuple::Tuple(NonnullRefPtr<TupleDescriptor> const& descriptor, u32 pointer)
: m_descriptor(descriptor)
, m_data()
, m_pointer(pointer)
{
for (auto& element : descriptor) {
m_data.append(Value(element.type));
for (auto& element : *descriptor) {
m_data.empend(element.type);
}
}
Tuple::Tuple(TupleDescriptor const& descriptor, ByteBuffer& buffer, size_t& offset)
Tuple::Tuple(NonnullRefPtr<TupleDescriptor> const& descriptor, ByteBuffer& buffer, size_t& offset)
: Tuple(descriptor)
{
deserialize(buffer, offset);
}
Tuple::Tuple(TupleDescriptor const& descriptor, ByteBuffer& buffer)
Tuple::Tuple(NonnullRefPtr<TupleDescriptor> const& descriptor, ByteBuffer& buffer)
: Tuple(descriptor)
{
size_t offset = 0;
@ -50,22 +50,22 @@ void Tuple::deserialize(ByteBuffer& buffer, size_t& offset)
deserialize_from<u32>(buffer, offset, m_pointer);
dbgln_if(SQL_DEBUG, "pointer: {}", m_pointer);
m_data.clear();
for (auto& part : m_descriptor) {
m_data.append(Value(part.type, buffer, offset));
dbgln_if(SQL_DEBUG, "Deserialized element {} = {}", part.name, m_data.last().to_string().value());
for (auto& part : *m_descriptor) {
m_data.empend(part.type, buffer, offset);
dbgln_if(SQL_DEBUG, "Deserialized element {} = {}", part.name, m_data.last().to_string());
}
}
void Tuple::serialize(ByteBuffer& buffer) const
{
VERIFY(m_descriptor.size() == m_data.size());
VERIFY(m_descriptor->size() == m_data.size());
dbgln_if(SQL_DEBUG, "Serializing tuple pointer {}", pointer());
serialize_to<u32>(buffer, pointer());
for (auto ix = 0u; ix < m_descriptor.size(); ix++) {
for (auto ix = 0u; ix < m_descriptor->size(); ix++) {
auto& key_part = m_data[ix];
if constexpr (SQL_DEBUG) {
auto str_opt = key_part.to_string();
auto& key_part_definition = m_descriptor[ix];
auto& key_part_definition = (*m_descriptor)[ix];
dbgln("Serialized part {} = {}", key_part_definition.name, (str_opt.has_value()) ? str_opt.value() : "(null)");
}
key_part.serialize(buffer);
@ -73,7 +73,7 @@ void Tuple::serialize(ByteBuffer& buffer) const
}
Tuple::Tuple(Tuple const& other)
: m_descriptor()
: m_descriptor(other.m_descriptor)
, m_data()
{
copy_from(other);
@ -90,8 +90,8 @@ Tuple& Tuple::operator=(Tuple const& other)
Optional<size_t> Tuple::index_of(String name) const
{
auto n = move(name);
for (auto ix = 0u; ix < m_descriptor.size(); ix++) {
auto& part = m_descriptor[ix];
for (auto ix = 0u; ix < m_descriptor->size(); ix++) {
auto& part = (*m_descriptor)[ix];
if (part.name == n) {
return (int)ix;
}
@ -127,7 +127,7 @@ Value& Tuple::operator[](String const& name)
void Tuple::append(const Value& value)
{
VERIFY(m_descriptor.size() == 0);
VERIFY(m_descriptor->size() == 0);
m_data.append(value);
}
@ -139,15 +139,15 @@ Tuple& Tuple::operator+=(Value const& value)
bool Tuple::is_compatible(Tuple const& other) const
{
if ((m_descriptor.size() == 0) && (other.m_descriptor.size() == 0)) {
if ((m_descriptor->size() == 0) && (other.m_descriptor->size() == 0)) {
return true;
}
if (m_descriptor.size() != other.m_descriptor.size()) {
if (m_descriptor->size() != other.m_descriptor->size()) {
return false;
}
for (auto ix = 0u; ix < m_descriptor.size(); ix++) {
auto& my_part = m_descriptor[ix];
auto& other_part = other.m_descriptor[ix];
for (auto ix = 0u; ix < m_descriptor->size(); ix++) {
auto& my_part = (*m_descriptor)[ix];
auto& other_part = (*other.m_descriptor)[ix];
if (my_part.type != other_part.type) {
return false;
}
@ -183,20 +183,13 @@ Vector<String> Tuple::to_string_vector() const
return ret;
}
size_t Tuple::size() const
{
size_t sz = sizeof(u32);
for (auto& part : m_data) {
sz += part.size();
}
return sz;
}
void Tuple::copy_from(const Tuple& other)
{
m_descriptor.clear();
for (TupleElement const& part : other.m_descriptor) {
m_descriptor.append(part);
if (*m_descriptor != *other.m_descriptor) {
m_descriptor->clear();
for (TupleElement const& part : *other.m_descriptor) {
m_descriptor->append(part);
}
}
m_data.clear();
for (auto& part : other.m_data) {
@ -212,7 +205,7 @@ int Tuple::compare(const Tuple& other) const
for (auto ix = 0u; ix < num_values; ix++) {
auto ret = m_data[ix].compare(other.m_data[ix]);
if (ret != 0) {
if ((ix < m_descriptor.size()) && m_descriptor[ix].order == Order::Descending)
if ((ix < m_descriptor->size()) && (*m_descriptor)[ix].order == Order::Descending)
ret = -ret;
return ret;
}
@ -223,7 +216,7 @@ int Tuple::compare(const Tuple& other) const
int Tuple::match(const Tuple& other) const
{
auto other_index = 0u;
for (auto& part : other.descriptor()) {
for (auto& part : *other.descriptor()) {
auto other_value = other[other_index];
if (other_value.is_null())
return 0;
@ -232,7 +225,7 @@ int Tuple::match(const Tuple& other) const
return -1;
auto ret = m_data[my_index.value()].compare(other_value);
if (ret != 0)
return (m_descriptor[my_index.value()].order == Order::Descending) ? -ret : ret;
return ((*m_descriptor)[my_index.value()].order == Order::Descending) ? -ret : ret;
other_index++;
}
return 0;

View file

@ -28,9 +28,9 @@ namespace SQL {
class Tuple {
public:
Tuple();
explicit Tuple(TupleDescriptor const&, u32 pointer = 0);
Tuple(TupleDescriptor const&, ByteBuffer&, size_t&);
Tuple(TupleDescriptor const&, ByteBuffer&);
explicit Tuple(NonnullRefPtr<TupleDescriptor> const&, u32 pointer = 0);
Tuple(NonnullRefPtr<TupleDescriptor> const&, ByteBuffer&, size_t&);
Tuple(NonnullRefPtr<TupleDescriptor> const&, ByteBuffer&);
Tuple(Tuple const&);
virtual ~Tuple() = default;
@ -61,14 +61,14 @@ public:
[[nodiscard]] u32 pointer() const { return m_pointer; }
void set_pointer(u32 ptr) { m_pointer = ptr; }
[[nodiscard]] size_t size() const;
[[nodiscard]] size_t length() const { return m_descriptor.size(); }
[[nodiscard]] TupleDescriptor descriptor() const { return m_descriptor; }
[[nodiscard]] size_t size() const { return m_data.size(); }
[[nodiscard]] size_t length() const { return m_descriptor->size(); }
[[nodiscard]] NonnullRefPtr<TupleDescriptor> descriptor() const { return m_descriptor; }
[[nodiscard]] int compare(Tuple const&) const;
[[nodiscard]] int match(Tuple const&) const;
[[nodiscard]] u32 hash() const;
virtual void serialize(ByteBuffer&) const;
[[nodiscard]] virtual size_t data_length() const { return descriptor().data_length(); }
[[nodiscard]] virtual size_t data_length() const { return descriptor()->data_length(); }
protected:
[[nodiscard]] Optional<size_t> index_of(String) const;
@ -76,7 +76,7 @@ protected:
void deserialize(ByteBuffer&, size_t&);
private:
TupleDescriptor m_descriptor;
NonnullRefPtr<TupleDescriptor> m_descriptor;
Vector<Value> m_data;
u32 m_pointer { 0 };
};

View file

@ -19,10 +19,11 @@ struct TupleElement {
bool operator==(TupleElement const&) const = default;
};
class TupleDescriptor : public Vector<TupleElement> {
class TupleDescriptor
: public Vector<TupleElement>
, public RefCounted<TupleDescriptor> {
public:
TupleDescriptor() = default;
TupleDescriptor(TupleDescriptor const&) = default;
~TupleDescriptor() = default;
[[nodiscard]] size_t data_length() const