1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 02:37:42 +00:00

Kernel: Add a write cache to DiskBackedFS.

This way you can spam small write()s on a file without the kernel writing
to disk every single time. Flushes are included in the FS::sync() operation
and will get triggered regularly by syncd. :^)
This commit is contained in:
Andreas Kling 2019-04-25 22:05:53 +02:00
parent e0cdc5db0d
commit 44673c4f3b
5 changed files with 49 additions and 13 deletions

View file

@ -118,6 +118,13 @@ public:
void* end_pointer() { return m_impl ? m_impl->end_pointer() : nullptr; } void* end_pointer() { return m_impl ? m_impl->end_pointer() : nullptr; }
const void* end_pointer() const { return m_impl ? m_impl->end_pointer() : nullptr; } const void* end_pointer() const { return m_impl ? m_impl->end_pointer() : nullptr; }
ByteBuffer isolated_copy() const
{
if (!m_impl)
return { };
return copy(m_impl->pointer(), m_impl->size());
}
// NOTE: trim() does not reallocate. // NOTE: trim() does not reallocate.
void trim(ssize_t size) void trim(ssize_t size)
{ {

View file

@ -66,8 +66,10 @@ bool DiskBackedFS::write_block(unsigned index, const ByteBuffer& data)
if (auto* cached_block = block_cache().resource().get({ fsid(), index })) if (auto* cached_block = block_cache().resource().get({ fsid(), index }))
cached_block->m_buffer = data; cached_block->m_buffer = data;
} }
DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
return device().write(base_offset, block_size(), data.pointer()); LOCKER(m_lock);
m_write_cache.set(index, data.isolated_copy());
return true;
} }
bool DiskBackedFS::write_blocks(unsigned index, unsigned count, const ByteBuffer& data) bool DiskBackedFS::write_blocks(unsigned index, unsigned count, const ByteBuffer& data)
@ -75,16 +77,9 @@ bool DiskBackedFS::write_blocks(unsigned index, unsigned count, const ByteBuffer
#ifdef DBFS_DEBUG #ifdef DBFS_DEBUG
kprintf("DiskBackedFileSystem::write_blocks %u x%u\n", index, count); kprintf("DiskBackedFileSystem::write_blocks %u x%u\n", index, count);
#endif #endif
// FIXME: Maybe reorder this so we send out the write commands before updating cache? for (unsigned i = 0; i < count; ++i)
{ write_block(index + i, data.slice(i * block_size(), block_size()));
LOCKER(block_cache().lock()); return true;
for (unsigned i = 0; i < count; ++i) {
if (auto* cached_block = block_cache().resource().get({ fsid(), index + i }))
cached_block->m_buffer = data.slice(i * block_size(), block_size());
}
}
DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
return device().write(base_offset, count * block_size(), data.pointer());
} }
ByteBuffer DiskBackedFS::read_block(unsigned index) const ByteBuffer DiskBackedFS::read_block(unsigned index) const
@ -92,6 +87,13 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const
#ifdef DBFS_DEBUG #ifdef DBFS_DEBUG
kprintf("DiskBackedFileSystem::read_block %u\n", index); kprintf("DiskBackedFileSystem::read_block %u\n", index);
#endif #endif
{
LOCKER(m_lock);
if (auto it = m_write_cache.find(index); it != m_write_cache.end())
return it->value;
}
{ {
LOCKER(block_cache().lock()); LOCKER(block_cache().lock());
if (auto* cached_block = block_cache().resource().get({ fsid(), index })) if (auto* cached_block = block_cache().resource().get({ fsid(), index }))
@ -105,6 +107,7 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const
bool success = device().read(base_offset, block_size(), buffer_pointer); bool success = device().read(base_offset, block_size(), buffer_pointer);
ASSERT(success); ASSERT(success);
ASSERT(buffer.size() == block_size()); ASSERT(buffer.size() == block_size());
{ {
LOCKER(block_cache().lock()); LOCKER(block_cache().lock());
block_cache().resource().put({ fsid(), index }, CachedBlock({ fsid(), index }, buffer)); block_cache().resource().put({ fsid(), index }, CachedBlock({ fsid(), index }, buffer));
@ -138,3 +141,13 @@ void DiskBackedFS::set_block_size(unsigned block_size)
return; return;
m_block_size = block_size; m_block_size = block_size;
} }
void DiskBackedFS::flush_writes()
{
LOCKER(m_lock);
for (auto& it : m_write_cache) {
DiskOffset base_offset = static_cast<DiskOffset>(it.key) * static_cast<DiskOffset>(block_size());
device().write(base_offset, block_size(), it.value.data());
}
m_write_cache.clear();
}

View file

@ -12,6 +12,8 @@ public:
int block_size() const { return m_block_size; } int block_size() const { return m_block_size; }
virtual void flush_writes() override;
protected: protected:
explicit DiskBackedFS(Retained<DiskDevice>&&); explicit DiskBackedFS(Retained<DiskDevice>&&);
@ -26,4 +28,6 @@ protected:
private: private:
int m_block_size { 0 }; int m_block_size { 0 };
Retained<DiskDevice> m_device; Retained<DiskDevice> m_device;
HashMap<unsigned, ByteBuffer> m_write_cache;
}; };

View file

@ -144,7 +144,7 @@ int Inode::decrement_link_count()
void FS::sync() void FS::sync()
{ {
Vector<Retained<Inode>> inodes; Vector<Retained<Inode>, 32> inodes;
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
for (auto* inode : all_inodes()) { for (auto* inode : all_inodes()) {
@ -157,6 +157,16 @@ void FS::sync()
ASSERT(inode->is_metadata_dirty()); ASSERT(inode->is_metadata_dirty());
inode->flush_metadata(); inode->flush_metadata();
} }
Vector<Retained<FS>, 32> fses;
{
InterruptDisabler disabler;
for (auto& it : all_fses())
fses.append(*it.value);
}
for (auto fs : fses)
fs->flush_writes();
} }
void Inode::set_vmo(VMObject& vmo) void Inode::set_vmo(VMObject& vmo)

View file

@ -57,6 +57,8 @@ public:
virtual RetainPtr<Inode> get_inode(InodeIdentifier) const = 0; virtual RetainPtr<Inode> get_inode(InodeIdentifier) const = 0;
virtual void flush_writes() { }
protected: protected:
FS(); FS();