1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 11:28:12 +00:00

Kernel: Handle OOM in DiskCache when mounting Ext2 filesystems

Create the disk cache up front, so we can verify it succeeds.
Make the KBuffer allocation fail-able, so we can properly handle
failure when the user asks up to mount a Ext2 filesystem under
OOM conditions.
This commit is contained in:
Brian Gianforcaro 2021-08-01 04:33:06 -07:00 committed by Andreas Kling
parent 187c086270
commit a6db2f985a
3 changed files with 36 additions and 12 deletions

View file

@ -20,13 +20,14 @@ struct CacheEntry {
class DiskCache { class DiskCache {
public: public:
explicit DiskCache(BlockBasedFileSystem& fs) static constexpr size_t EntryCount = 10000;
explicit DiskCache(BlockBasedFileSystem& fs, NonnullOwnPtr<KBuffer> cached_block_data, NonnullOwnPtr<KBuffer> entries_buffer)
: m_fs(fs) : m_fs(fs)
, m_cached_block_data(KBuffer::create_with_size(m_entry_count * m_fs.block_size())) , m_cached_block_data(move(cached_block_data))
, m_entries(KBuffer::create_with_size(m_entry_count * sizeof(CacheEntry))) , m_entries(move(entries_buffer))
{ {
for (size_t i = 0; i < m_entry_count; ++i) { for (size_t i = 0; i < EntryCount; ++i) {
entries()[i].data = m_cached_block_data.data() + i * m_fs.block_size(); entries()[i].data = m_cached_block_data->data() + i * m_fs.block_size();
m_clean_list.append(entries()[i]); m_clean_list.append(entries()[i]);
} }
} }
@ -83,8 +84,8 @@ public:
return new_entry; return new_entry;
} }
const CacheEntry* entries() const { return (const CacheEntry*)m_entries.data(); } const CacheEntry* entries() const { return (const CacheEntry*)m_entries->data(); }
CacheEntry* entries() { return (CacheEntry*)m_entries.data(); } CacheEntry* entries() { return (CacheEntry*)m_entries->data(); }
template<typename Callback> template<typename Callback>
void for_each_dirty_entry(Callback callback) void for_each_dirty_entry(Callback callback)
@ -95,12 +96,11 @@ public:
private: private:
BlockBasedFileSystem& m_fs; BlockBasedFileSystem& m_fs;
size_t m_entry_count { 10000 };
mutable HashMap<BlockBasedFileSystem::BlockIndex, CacheEntry*> m_hash; mutable HashMap<BlockBasedFileSystem::BlockIndex, CacheEntry*> m_hash;
mutable IntrusiveList<CacheEntry, RawPtr<CacheEntry>, &CacheEntry::list_node> m_clean_list; mutable IntrusiveList<CacheEntry, RawPtr<CacheEntry>, &CacheEntry::list_node> m_clean_list;
mutable IntrusiveList<CacheEntry, RawPtr<CacheEntry>, &CacheEntry::list_node> m_dirty_list; mutable IntrusiveList<CacheEntry, RawPtr<CacheEntry>, &CacheEntry::list_node> m_dirty_list;
KBuffer m_cached_block_data; NonnullOwnPtr<KBuffer> m_cached_block_data;
KBuffer m_entries; NonnullOwnPtr<KBuffer> m_entries;
bool m_dirty { false }; bool m_dirty { false };
}; };
@ -114,6 +114,25 @@ BlockBasedFileSystem::~BlockBasedFileSystem()
{ {
} }
bool BlockBasedFileSystem::initialize()
{
VERIFY(block_size() != 0);
auto cached_block_data = KBuffer::try_create_with_size(DiskCache::EntryCount * block_size());
if (!cached_block_data)
return false;
auto entries_data = KBuffer::try_create_with_size(DiskCache::EntryCount * sizeof(CacheEntry));
if (!entries_data)
return false;
auto disk_cache = adopt_own_if_nonnull(new (nothrow) DiskCache(*this, cached_block_data.release_nonnull(), entries_data.release_nonnull()));
if (!disk_cache)
return false;
m_cache = move(disk_cache);
return true;
}
KResult BlockBasedFileSystem::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache) KResult BlockBasedFileSystem::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
{ {
VERIFY(m_logical_block_size); VERIFY(m_logical_block_size);
@ -293,8 +312,6 @@ void BlockBasedFileSystem::flush_writes()
DiskCache& BlockBasedFileSystem::cache() const DiskCache& BlockBasedFileSystem::cache() const
{ {
if (!m_cache)
m_cache = make<DiskCache>(const_cast<BlockBasedFileSystem&>(*this));
return *m_cache; return *m_cache;
} }

View file

@ -15,6 +15,7 @@ public:
TYPEDEF_DISTINCT_ORDERED_ID(u64, BlockIndex); TYPEDEF_DISTINCT_ORDERED_ID(u64, BlockIndex);
virtual ~BlockBasedFileSystem() override; virtual ~BlockBasedFileSystem() override;
virtual bool initialize() override;
u64 logical_block_size() const { return m_logical_block_size; }; u64 logical_block_size() const { return m_logical_block_size; };

View file

@ -89,6 +89,7 @@ const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
bool Ext2FS::initialize() bool Ext2FS::initialize()
{ {
MutexLocker locker(m_lock); MutexLocker locker(m_lock);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0); VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block); auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer); bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
@ -115,6 +116,11 @@ bool Ext2FS::initialize()
set_block_size(EXT2_BLOCK_SIZE(&super_block)); set_block_size(EXT2_BLOCK_SIZE(&super_block));
set_fragment_size(EXT2_FRAG_SIZE(&super_block)); set_fragment_size(EXT2_FRAG_SIZE(&super_block));
// Note: This depends on the block size being available.
auto baseclass_result = BlockBasedFileSystem::initialize();
if (!baseclass_result)
return baseclass_result;
VERIFY(block_size() <= (int)max_block_size); VERIFY(block_size() <= (int)max_block_size);
m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group); m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group);