1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 09:48:11 +00:00

Ext2FS: Fail the mount if BGD table cache allocation fails

Instead of asserting if we can't allocate enough memory for a BGD table
cache, just fail the mount instead.
This commit is contained in:
Andreas Kling 2020-12-18 13:18:47 +01:00
parent 8cde8ba511
commit 47da86d136
2 changed files with 9 additions and 5 deletions

View file

@ -141,8 +141,12 @@ bool Ext2FS::initialize()
unsigned blocks_to_read = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size()); unsigned blocks_to_read = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
BlockIndex first_block_of_bgdt = block_size() == 1024 ? 2 : 1; BlockIndex first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
m_cached_group_descriptor_table = KBuffer::create_with_size(block_size() * blocks_to_read, Region::Access::Read | Region::Access::Write, "Ext2FS: Block group descriptors"); m_cached_group_descriptor_table = KBuffer::try_create_with_size(block_size() * blocks_to_read, Region::Access::Read | Region::Access::Write, "Ext2FS: Block group descriptors");
auto buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_group_descriptor_table.value().data()); if (!m_cached_group_descriptor_table) {
dbgln("Ext2FS: Failed to allocate memory for group descriptor table");
return false;
}
auto buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_group_descriptor_table->data());
read_blocks(first_block_of_bgdt, blocks_to_read, buffer); read_blocks(first_block_of_bgdt, blocks_to_read, buffer);
#ifdef EXT2_DEBUG #ifdef EXT2_DEBUG

View file

@ -120,8 +120,8 @@ private:
const ext2_super_block& super_block() const { return m_super_block; } const ext2_super_block& super_block() const { return m_super_block; }
const ext2_group_desc& group_descriptor(GroupIndex) const; const ext2_group_desc& group_descriptor(GroupIndex) const;
ext2_group_desc* block_group_descriptors() { return (ext2_group_desc*)m_cached_group_descriptor_table.value().data(); } ext2_group_desc* block_group_descriptors() { return (ext2_group_desc*)m_cached_group_descriptor_table->data(); }
const ext2_group_desc* block_group_descriptors() const { return (const ext2_group_desc*)m_cached_group_descriptor_table.value().data(); } const ext2_group_desc* block_group_descriptors() const { return (const ext2_group_desc*)m_cached_group_descriptor_table->data(); }
void flush_block_group_descriptor_table(); void flush_block_group_descriptor_table();
unsigned inodes_per_block() const; unsigned inodes_per_block() const;
unsigned inodes_per_group() const; unsigned inodes_per_group() const;
@ -170,7 +170,7 @@ private:
unsigned m_block_group_count { 0 }; unsigned m_block_group_count { 0 };
mutable ext2_super_block m_super_block; mutable ext2_super_block m_super_block;
mutable Optional<KBuffer> m_cached_group_descriptor_table; mutable OwnPtr<KBuffer> m_cached_group_descriptor_table;
mutable HashMap<InodeIndex, RefPtr<Ext2FSInode>> m_inode_cache; mutable HashMap<InodeIndex, RefPtr<Ext2FSInode>> m_inode_cache;