mirror of
https://github.com/RGBCube/serenity
synced 2025-07-24 22:27:42 +00:00
Kernel: Use a doubly-linked list for the BlockBasedFS cache
This makes misses in the BlockBasedFS's LRU block cache faster by storing the cache entries in one of two doubly-linked list. Dirty and clean cache entries are kept in two separate lists, and move between them when their state changes. This can probably be improved upon further.
This commit is contained in:
parent
c33d71c5ff
commit
5f2f31861c
1 changed files with 55 additions and 46 deletions
|
@ -24,6 +24,7 @@
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <AK/IntrusiveList.h>
|
||||||
#include <Kernel/FileSystem/BlockBasedFileSystem.h>
|
#include <Kernel/FileSystem/BlockBasedFileSystem.h>
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
|
|
||||||
|
@ -32,12 +33,10 @@
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
struct CacheEntry {
|
struct CacheEntry {
|
||||||
time_t timestamp { 0 };
|
IntrusiveListNode list_node;
|
||||||
u32 block_index { 0 };
|
u32 block_index { 0 };
|
||||||
u8* data { nullptr };
|
u8* data { nullptr };
|
||||||
bool has_data { false };
|
bool has_data { false };
|
||||||
bool is_dirty { false };
|
|
||||||
bool is_used { false };
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class DiskCache {
|
class DiskCache {
|
||||||
|
@ -49,6 +48,7 @@ public:
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < m_entry_count; ++i) {
|
for (size_t i = 0; i < m_entry_count; ++i) {
|
||||||
entries()[i].data = m_cached_block_data.data() + i * m_fs.block_size();
|
entries()[i].data = m_cached_block_data.data() + i * m_fs.block_size();
|
||||||
|
m_clean_list.append(entries()[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,29 +57,33 @@ public:
|
||||||
bool is_dirty() const { return m_dirty; }
|
bool is_dirty() const { return m_dirty; }
|
||||||
void set_dirty(bool b) { m_dirty = b; }
|
void set_dirty(bool b) { m_dirty = b; }
|
||||||
|
|
||||||
|
void mark_all_clean()
|
||||||
|
{
|
||||||
|
while (auto* entry = m_dirty_list.first())
|
||||||
|
m_clean_list.prepend(*entry);
|
||||||
|
m_dirty = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mark_dirty(CacheEntry& entry)
|
||||||
|
{
|
||||||
|
m_dirty_list.prepend(entry);
|
||||||
|
m_dirty = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mark_clean(CacheEntry& entry)
|
||||||
|
{
|
||||||
|
m_clean_list.prepend(entry);
|
||||||
|
}
|
||||||
|
|
||||||
CacheEntry& get(u32 block_index) const
|
CacheEntry& get(u32 block_index) const
|
||||||
{
|
{
|
||||||
auto now = kgettimeofday().tv_sec;
|
if (auto it = m_hash.find(block_index); it != m_hash.end()) {
|
||||||
|
auto& entry = const_cast<CacheEntry&>(*it->value);
|
||||||
if (auto it = m_map.find(block_index); it != m_map.end()) {
|
|
||||||
auto& entry = const_cast<CacheEntry&>(entries()[it->value]);
|
|
||||||
ASSERT(entry.block_index == block_index);
|
ASSERT(entry.block_index == block_index);
|
||||||
entry.timestamp = now;
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Optional<size_t> oldest_clean_index;
|
if (m_clean_list.is_empty()) {
|
||||||
for (size_t i = 0; i < m_entry_count; ++i) {
|
|
||||||
auto& entry = const_cast<CacheEntry&>(entries()[i]);
|
|
||||||
ASSERT(!entry.is_used || entry.block_index != block_index);
|
|
||||||
if (!entry.is_dirty) {
|
|
||||||
if (!oldest_clean_index.has_value())
|
|
||||||
oldest_clean_index = i;
|
|
||||||
else if (entry.timestamp < entries()[oldest_clean_index.value()].timestamp)
|
|
||||||
oldest_clean_index = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!oldest_clean_index.has_value()) {
|
|
||||||
// Not a single clean entry! Flush writes and try again.
|
// Not a single clean entry! Flush writes and try again.
|
||||||
// NOTE: We want to make sure we only call FileBackedFS flush here,
|
// NOTE: We want to make sure we only call FileBackedFS flush here,
|
||||||
// not some FileBackedFS subclass flush!
|
// not some FileBackedFS subclass flush!
|
||||||
|
@ -87,21 +91,16 @@ public:
|
||||||
return get(block_index);
|
return get(block_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace the oldest clean entry.
|
ASSERT(m_clean_list.last());
|
||||||
auto& new_entry = const_cast<CacheEntry&>(entries()[oldest_clean_index.value()]);
|
auto& new_entry = *m_clean_list.last();
|
||||||
|
m_clean_list.prepend(new_entry);
|
||||||
|
|
||||||
// If this entry was occupied, remove the previous mapping from the fast lookup table.
|
m_hash.remove(new_entry.block_index);
|
||||||
if (new_entry.block_index)
|
m_hash.set(block_index, &new_entry);
|
||||||
m_map.remove(new_entry.block_index);
|
|
||||||
|
|
||||||
// Create a fast lookup mapping from the block index to this entry.
|
|
||||||
m_map.set(block_index, oldest_clean_index.value());
|
|
||||||
|
|
||||||
new_entry.timestamp = now;
|
|
||||||
new_entry.block_index = block_index;
|
new_entry.block_index = block_index;
|
||||||
new_entry.has_data = false;
|
new_entry.has_data = false;
|
||||||
new_entry.is_dirty = false;
|
|
||||||
new_entry.is_used = true;
|
|
||||||
return new_entry;
|
return new_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,16 +108,25 @@ public:
|
||||||
CacheEntry* entries() { return (CacheEntry*)m_entries.data(); }
|
CacheEntry* entries() { return (CacheEntry*)m_entries.data(); }
|
||||||
|
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
void for_each_entry(Callback callback)
|
void for_each_clean_entry(Callback callback)
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < m_entry_count; ++i)
|
for (auto& entry : m_clean_list)
|
||||||
callback(entries()[i]);
|
callback(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename Callback>
|
||||||
|
void for_each_dirty_entry(Callback callback)
|
||||||
|
{
|
||||||
|
for (auto& entry : m_dirty_list)
|
||||||
|
callback(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BlockBasedFS& m_fs;
|
BlockBasedFS& m_fs;
|
||||||
mutable HashMap<u32, u32> m_map;
|
|
||||||
size_t m_entry_count { 10000 };
|
size_t m_entry_count { 10000 };
|
||||||
|
mutable HashMap<u32, CacheEntry*> m_hash;
|
||||||
|
mutable IntrusiveList<CacheEntry, &CacheEntry::list_node> m_clean_list;
|
||||||
|
mutable IntrusiveList<CacheEntry, &CacheEntry::list_node> m_dirty_list;
|
||||||
KBuffer m_cached_block_data;
|
KBuffer m_cached_block_data;
|
||||||
KBuffer m_entries;
|
KBuffer m_entries;
|
||||||
bool m_dirty { false };
|
bool m_dirty { false };
|
||||||
|
@ -160,10 +168,9 @@ int BlockBasedFS::write_block(unsigned index, const UserOrKernelBuffer& data, si
|
||||||
}
|
}
|
||||||
if (!data.read(entry.data + offset, count))
|
if (!data.read(entry.data + offset, count))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
entry.is_dirty = true;
|
|
||||||
entry.has_data = true;
|
|
||||||
|
|
||||||
cache().set_dirty(true);
|
cache().mark_dirty(entry);
|
||||||
|
entry.has_data = true;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,16 +283,21 @@ void BlockBasedFS::flush_specific_block_if_needed(unsigned index)
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
if (!cache().is_dirty())
|
if (!cache().is_dirty())
|
||||||
return;
|
return;
|
||||||
cache().for_each_entry([&](CacheEntry& entry) {
|
Vector<CacheEntry*, 32> cleaned_entries;
|
||||||
if (entry.is_dirty && entry.block_index == index) {
|
cache().for_each_dirty_entry([&](CacheEntry& entry) {
|
||||||
|
if (entry.block_index != index) {
|
||||||
u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
|
u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
|
||||||
file_description().seek(base_offset, SEEK_SET);
|
file_description().seek(base_offset, SEEK_SET);
|
||||||
// FIXME: Should this error path be surfaced somehow?
|
// FIXME: Should this error path be surfaced somehow?
|
||||||
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
|
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
|
||||||
(void)file_description().write(entry_data_buffer, block_size());
|
(void)file_description().write(entry_data_buffer, block_size());
|
||||||
entry.is_dirty = false;
|
cleaned_entries.append(&entry);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
// NOTE: We make a separate pass to mark entries clean since marking them clean
|
||||||
|
// moves them out of the dirty list which would disturb the iteration above.
|
||||||
|
for (auto* entry : cleaned_entries)
|
||||||
|
cache().mark_clean(*entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockBasedFS::flush_writes_impl()
|
void BlockBasedFS::flush_writes_impl()
|
||||||
|
@ -294,18 +306,15 @@ void BlockBasedFS::flush_writes_impl()
|
||||||
if (!cache().is_dirty())
|
if (!cache().is_dirty())
|
||||||
return;
|
return;
|
||||||
u32 count = 0;
|
u32 count = 0;
|
||||||
cache().for_each_entry([&](CacheEntry& entry) {
|
cache().for_each_dirty_entry([&](CacheEntry& entry) {
|
||||||
if (!entry.is_dirty)
|
|
||||||
return;
|
|
||||||
u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
|
u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
|
||||||
file_description().seek(base_offset, SEEK_SET);
|
file_description().seek(base_offset, SEEK_SET);
|
||||||
// FIXME: Should this error path be surfaced somehow?
|
// FIXME: Should this error path be surfaced somehow?
|
||||||
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
|
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
|
||||||
(void)file_description().write(entry_data_buffer, block_size());
|
(void)file_description().write(entry_data_buffer, block_size());
|
||||||
++count;
|
++count;
|
||||||
entry.is_dirty = false;
|
|
||||||
});
|
});
|
||||||
cache().set_dirty(false);
|
cache().mark_all_clean();
|
||||||
dbg() << class_name() << ": Flushed " << count << " blocks to disk";
|
dbg() << class_name() << ": Flushed " << count << " blocks to disk";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue