From a61f6ccc2757b3b7711df5fd7374ae0fc0ac55d6 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Mon, 30 Sep 2019 10:31:06 +0200 Subject: [PATCH] Kernel: Implement a simpler, bigger cache for DiskBackedFS The hashmap cache was ridiculously slow and hurt us more than it helped us. This patch replaces it with a flat memory cache that keeps up to 10'000 blocks in cache with a simple dirty bit. The syncd task will wake up periodically and call flush_writes() on all file systems, which now causes us to traverse the cache and write all dirty blocks to disk. There's a ton of room for improvement here, but this itself is already drastically better when doing repeated GCC invocations. --- Kernel/FileSystem/DiskBackedFileSystem.cpp | 165 ++++++++++++--------- Kernel/FileSystem/DiskBackedFileSystem.h | 6 +- 2 files changed, 97 insertions(+), 74 deletions(-) diff --git a/Kernel/FileSystem/DiskBackedFileSystem.cpp b/Kernel/FileSystem/DiskBackedFileSystem.cpp index 59d51ff4b4..85b791adb2 100644 --- a/Kernel/FileSystem/DiskBackedFileSystem.cpp +++ b/Kernel/FileSystem/DiskBackedFileSystem.cpp @@ -1,50 +1,77 @@ -#include "DiskBackedFileSystem.h" -#include #include +#include +#include #include //#define DBFS_DEBUG -struct BlockIdentifier { - unsigned fsid { 0 }; - unsigned index { 0 }; - - bool operator==(const BlockIdentifier& other) const { return fsid == other.fsid && index == other.index; } +struct CacheEntry { + u32 timestamp { 0 }; + u32 block_index { 0 }; + u8* data { nullptr }; + bool has_data { false }; + bool is_dirty { false }; }; -namespace AK { - -template<> -struct Traits : public GenericTraits { - static unsigned hash(const BlockIdentifier& block_id) { return pair_int_hash(block_id.fsid, block_id.index); } - static void dump(const BlockIdentifier& block_id) { kprintf("[block %02u:%08u]", block_id.fsid, block_id.index); } -}; - -} - -class CachedBlock : public InlineLinkedListNode { +class DiskCache { public: - CachedBlock(const BlockIdentifier& block_id, const ByteBuffer& buffer) - : m_key(block_id) - , m_buffer(buffer) + explicit DiskCache(size_t block_size) + : m_cached_block_data(KBuffer::create_with_size(m_entry_count * block_size)) { + m_entries = (CacheEntry*)kmalloc_eternal(m_entry_count * sizeof(CacheEntry)); + for (size_t i = 0; i < m_entry_count; ++i) { + m_entries[i].data = m_cached_block_data.data() + i * block_size; + } } - BlockIdentifier m_key; - CachedBlock* m_next { nullptr }; - CachedBlock* m_prev { nullptr }; + ~DiskCache() {} - ByteBuffer m_buffer; + bool is_dirty() const { return m_dirty; } + void set_dirty(bool b) { m_dirty = b; } + + CacheEntry& get(u32 block_index) const + { + auto now = kgettimeofday().tv_sec; + + CacheEntry* oldest_clean_entry = nullptr; + for (size_t i = 0; i < m_entry_count; ++i) { + auto& entry = m_entries[i]; + if (entry.block_index == block_index) { + entry.timestamp = now; + return entry; + } + if (!entry.is_dirty) { + if (!oldest_clean_entry) + oldest_clean_entry = &entry; + else if (entry.timestamp < oldest_clean_entry->timestamp) + oldest_clean_entry = &entry; + } + } + // FIXME: What if every single entry was dirty though :( + ASSERT(oldest_clean_entry); + + // Replace the oldest clean entry. + auto& new_entry = *oldest_clean_entry; + new_entry.timestamp = now; + new_entry.block_index = block_index; + new_entry.has_data = false; + new_entry.is_dirty = false; + return new_entry; + } + + template + void for_each_entry(Callback callback) + { + for (size_t i = 0; i < m_entry_count; ++i) + callback(m_entries[i]); + } + + size_t m_entry_count { 10000 }; + KBuffer m_cached_block_data; + CacheEntry* m_entries { nullptr }; + bool m_dirty { false }; }; -Lockable>& block_cache() -{ - static Lockable>* s_cache; - if (!s_cache) - s_cache = new Lockable>; - return *s_cache; -} - DiskBackedFS::DiskBackedFS(NonnullRefPtr&& device) : m_device(move(device)) { @@ -61,18 +88,12 @@ bool DiskBackedFS::write_block(unsigned index, const ByteBuffer& data) #endif ASSERT(data.size() == block_size()); - { - LOCKER(block_cache().lock()); - if (auto* cached_block = block_cache().resource().get({ fsid(), index })) - cached_block->m_buffer = data; - } - - LOCKER(m_lock); - m_write_cache.set(index, data.isolated_copy()); - - if (m_write_cache.size() >= 32) - flush_writes(); + auto& entry = cache().get(index); + memcpy(entry.data, data.data(), data.size()); + entry.is_dirty = true; + entry.has_data = true; + cache().set_dirty(true); return true; } @@ -92,31 +113,14 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const kprintf("DiskBackedFileSystem::read_block %u\n", index); #endif - { - LOCKER(m_lock); - if (auto it = m_write_cache.find(index); it != m_write_cache.end()) - return it->value; + auto& entry = cache().get(index); + if (!entry.has_data) { + DiskOffset base_offset = static_cast(index) * static_cast(block_size()); + bool success = device().read(base_offset, block_size(), entry.data); + entry.has_data = true; + ASSERT(success); } - - { - LOCKER(block_cache().lock()); - if (auto* cached_block = block_cache().resource().get({ fsid(), index })) - return cached_block->m_buffer; - } - - auto buffer = ByteBuffer::create_uninitialized(block_size()); - //kprintf("created block buffer with size %u\n", block_size()); - DiskOffset base_offset = static_cast(index) * static_cast(block_size()); - auto* buffer_pointer = buffer.data(); - bool success = device().read(base_offset, block_size(), buffer_pointer); - ASSERT(success); - ASSERT(buffer.size() == block_size()); - - { - LOCKER(block_cache().lock()); - block_cache().resource().put({ fsid(), index }, CachedBlock({ fsid(), index }, buffer)); - } - return buffer; + return ByteBuffer::copy(entry.data, block_size()); } ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const @@ -142,9 +146,24 @@ ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const void DiskBackedFS::flush_writes() { LOCKER(m_lock); - for (auto& it : m_write_cache) { - DiskOffset base_offset = static_cast(it.key) * static_cast(block_size()); - device().write(base_offset, block_size(), it.value.data()); - } - m_write_cache.clear(); + if (!cache().is_dirty()) + return; + u32 count = 0; + cache().for_each_entry([&](CacheEntry& entry) { + if (!entry.is_dirty) + return; + DiskOffset base_offset = static_cast(entry.block_index) * static_cast(block_size()); + device().write(base_offset, block_size(), entry.data); + ++count; + entry.is_dirty = false; + }); + cache().set_dirty(false); + dbg() << class_name() << ": " << "Flushed " << count << " blocks to disk"; +} + +DiskCache& DiskBackedFS::cache() const +{ + if (!m_cache) + m_cache = make(block_size()); + return *m_cache; } diff --git a/Kernel/FileSystem/DiskBackedFileSystem.h b/Kernel/FileSystem/DiskBackedFileSystem.h index 1b6ee8189c..fff5b11945 100644 --- a/Kernel/FileSystem/DiskBackedFileSystem.h +++ b/Kernel/FileSystem/DiskBackedFileSystem.h @@ -3,6 +3,8 @@ #include "FileSystem.h" #include +class DiskCache; + class DiskBackedFS : public FS { public: virtual ~DiskBackedFS() override; @@ -24,6 +26,8 @@ protected: bool write_blocks(unsigned index, unsigned count, const ByteBuffer&); private: + DiskCache& cache() const; + NonnullRefPtr m_device; - HashMap m_write_cache; + mutable OwnPtr m_cache; };