1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 13:18:13 +00:00

Kernel: Implement a simpler, bigger cache for DiskBackedFS

The hashmap cache was ridiculously slow and hurt us more than it helped
us. This patch replaces it with a flat memory cache that keeps up to
10'000 blocks in cache with a simple dirty bit.

The syncd task will wake up periodically and call flush_writes() on all
file systems, which now causes us to traverse the cache and write all
dirty blocks to disk.

There's a ton of room for improvement here, but this itself is already
drastically better when doing repeated GCC invocations.
This commit is contained in:
Andreas Kling 2019-09-30 10:31:06 +02:00
parent 8f45a259fc
commit a61f6ccc27
2 changed files with 97 additions and 74 deletions

View file

@ -1,50 +1,77 @@
#include "DiskBackedFileSystem.h"
#include <AK/InlineLRUCache.h>
#include <Kernel/Arch/i386/CPU.h> #include <Kernel/Arch/i386/CPU.h>
#include <Kernel/FileSystem/DiskBackedFileSystem.h>
#include <Kernel/KBuffer.h>
#include <Kernel/Process.h> #include <Kernel/Process.h>
//#define DBFS_DEBUG //#define DBFS_DEBUG
struct BlockIdentifier { struct CacheEntry {
unsigned fsid { 0 }; u32 timestamp { 0 };
unsigned index { 0 }; u32 block_index { 0 };
u8* data { nullptr };
bool operator==(const BlockIdentifier& other) const { return fsid == other.fsid && index == other.index; } bool has_data { false };
bool is_dirty { false };
}; };
namespace AK { class DiskCache {
template<>
struct Traits<BlockIdentifier> : public GenericTraits<BlockIdentifier> {
static unsigned hash(const BlockIdentifier& block_id) { return pair_int_hash(block_id.fsid, block_id.index); }
static void dump(const BlockIdentifier& block_id) { kprintf("[block %02u:%08u]", block_id.fsid, block_id.index); }
};
}
class CachedBlock : public InlineLinkedListNode<CachedBlock> {
public: public:
CachedBlock(const BlockIdentifier& block_id, const ByteBuffer& buffer) explicit DiskCache(size_t block_size)
: m_key(block_id) : m_cached_block_data(KBuffer::create_with_size(m_entry_count * block_size))
, m_buffer(buffer)
{ {
m_entries = (CacheEntry*)kmalloc_eternal(m_entry_count * sizeof(CacheEntry));
for (size_t i = 0; i < m_entry_count; ++i) {
m_entries[i].data = m_cached_block_data.data() + i * block_size;
}
} }
BlockIdentifier m_key; ~DiskCache() {}
CachedBlock* m_next { nullptr };
CachedBlock* m_prev { nullptr };
ByteBuffer m_buffer; bool is_dirty() const { return m_dirty; }
void set_dirty(bool b) { m_dirty = b; }
CacheEntry& get(u32 block_index) const
{
auto now = kgettimeofday().tv_sec;
CacheEntry* oldest_clean_entry = nullptr;
for (size_t i = 0; i < m_entry_count; ++i) {
auto& entry = m_entries[i];
if (entry.block_index == block_index) {
entry.timestamp = now;
return entry;
}
if (!entry.is_dirty) {
if (!oldest_clean_entry)
oldest_clean_entry = &entry;
else if (entry.timestamp < oldest_clean_entry->timestamp)
oldest_clean_entry = &entry;
}
}
// FIXME: What if every single entry was dirty though :(
ASSERT(oldest_clean_entry);
// Replace the oldest clean entry.
auto& new_entry = *oldest_clean_entry;
new_entry.timestamp = now;
new_entry.block_index = block_index;
new_entry.has_data = false;
new_entry.is_dirty = false;
return new_entry;
}
template<typename Callback>
void for_each_entry(Callback callback)
{
for (size_t i = 0; i < m_entry_count; ++i)
callback(m_entries[i]);
}
size_t m_entry_count { 10000 };
KBuffer m_cached_block_data;
CacheEntry* m_entries { nullptr };
bool m_dirty { false };
}; };
Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>& block_cache()
{
static Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>* s_cache;
if (!s_cache)
s_cache = new Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>;
return *s_cache;
}
DiskBackedFS::DiskBackedFS(NonnullRefPtr<DiskDevice>&& device) DiskBackedFS::DiskBackedFS(NonnullRefPtr<DiskDevice>&& device)
: m_device(move(device)) : m_device(move(device))
{ {
@ -61,18 +88,12 @@ bool DiskBackedFS::write_block(unsigned index, const ByteBuffer& data)
#endif #endif
ASSERT(data.size() == block_size()); ASSERT(data.size() == block_size());
{ auto& entry = cache().get(index);
LOCKER(block_cache().lock()); memcpy(entry.data, data.data(), data.size());
if (auto* cached_block = block_cache().resource().get({ fsid(), index })) entry.is_dirty = true;
cached_block->m_buffer = data; entry.has_data = true;
}
LOCKER(m_lock);
m_write_cache.set(index, data.isolated_copy());
if (m_write_cache.size() >= 32)
flush_writes();
cache().set_dirty(true);
return true; return true;
} }
@ -92,31 +113,14 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const
kprintf("DiskBackedFileSystem::read_block %u\n", index); kprintf("DiskBackedFileSystem::read_block %u\n", index);
#endif #endif
{ auto& entry = cache().get(index);
LOCKER(m_lock); if (!entry.has_data) {
if (auto it = m_write_cache.find(index); it != m_write_cache.end()) DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
return it->value; bool success = device().read(base_offset, block_size(), entry.data);
entry.has_data = true;
ASSERT(success);
} }
return ByteBuffer::copy(entry.data, block_size());
{
LOCKER(block_cache().lock());
if (auto* cached_block = block_cache().resource().get({ fsid(), index }))
return cached_block->m_buffer;
}
auto buffer = ByteBuffer::create_uninitialized(block_size());
//kprintf("created block buffer with size %u\n", block_size());
DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
auto* buffer_pointer = buffer.data();
bool success = device().read(base_offset, block_size(), buffer_pointer);
ASSERT(success);
ASSERT(buffer.size() == block_size());
{
LOCKER(block_cache().lock());
block_cache().resource().put({ fsid(), index }, CachedBlock({ fsid(), index }, buffer));
}
return buffer;
} }
ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const
@ -142,9 +146,24 @@ ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const
void DiskBackedFS::flush_writes() void DiskBackedFS::flush_writes()
{ {
LOCKER(m_lock); LOCKER(m_lock);
for (auto& it : m_write_cache) { if (!cache().is_dirty())
DiskOffset base_offset = static_cast<DiskOffset>(it.key) * static_cast<DiskOffset>(block_size()); return;
device().write(base_offset, block_size(), it.value.data()); u32 count = 0;
} cache().for_each_entry([&](CacheEntry& entry) {
m_write_cache.clear(); if (!entry.is_dirty)
return;
DiskOffset base_offset = static_cast<DiskOffset>(entry.block_index) * static_cast<DiskOffset>(block_size());
device().write(base_offset, block_size(), entry.data);
++count;
entry.is_dirty = false;
});
cache().set_dirty(false);
dbg() << class_name() << ": " << "Flushed " << count << " blocks to disk";
}
DiskCache& DiskBackedFS::cache() const
{
if (!m_cache)
m_cache = make<DiskCache>(block_size());
return *m_cache;
} }

View file

@ -3,6 +3,8 @@
#include "FileSystem.h" #include "FileSystem.h"
#include <AK/ByteBuffer.h> #include <AK/ByteBuffer.h>
class DiskCache;
class DiskBackedFS : public FS { class DiskBackedFS : public FS {
public: public:
virtual ~DiskBackedFS() override; virtual ~DiskBackedFS() override;
@ -24,6 +26,8 @@ protected:
bool write_blocks(unsigned index, unsigned count, const ByteBuffer&); bool write_blocks(unsigned index, unsigned count, const ByteBuffer&);
private: private:
DiskCache& cache() const;
NonnullRefPtr<DiskDevice> m_device; NonnullRefPtr<DiskDevice> m_device;
HashMap<unsigned, ByteBuffer> m_write_cache; mutable OwnPtr<DiskCache> m_cache;
}; };