mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 11:18:11 +00:00
Ext2FS: Don't hog FS lock while reading/writing inodes
There are two locks in the Ext2FS implementation: * The FS lock (Ext2FS::m_lock) This governs access to the superblock, block group descriptors, and the block & inode bitmap blocks. It's held while allocating or freeing blocks/inodes. * The inode lock (Ext2FSInode::m_lock) This governs access to the inode metadata, including the block list, and to the content data as well. It's held while doing basically anything with the inode. Once an on-disk block/inode is allocated, it logically belongs to the in-memory Inode object, so there's no need for the FS lock to be taken while manipulating them, the inode lock is all you need. This dramatically reduces the impact of disk I/O on path resolution and various other things that look at individual inodes.
This commit is contained in:
parent
c7c63727bf
commit
c09921b9be
2 changed files with 46 additions and 50 deletions
|
@ -76,12 +76,12 @@ private:
|
|||
virtual KResult chmod(mode_t) override;
|
||||
virtual KResult chown(uid_t, gid_t) override;
|
||||
virtual KResult truncate(u64) override;
|
||||
|
||||
virtual KResultOr<int> get_block_address(int) override;
|
||||
|
||||
KResult write_directory(const Vector<Ext2FSDirectoryEntry>&);
|
||||
bool populate_lookup_cache() const;
|
||||
KResult resize(u64);
|
||||
KResult flush_block_list();
|
||||
|
||||
Ext2FS& fs();
|
||||
const Ext2FS& fs() const;
|
||||
|
@ -147,7 +147,6 @@ private:
|
|||
|
||||
Vector<BlockIndex> block_list_for_inode_impl(const ext2_inode&, bool include_block_list_blocks = false) const;
|
||||
Vector<BlockIndex> block_list_for_inode(const ext2_inode&, bool include_block_list_blocks = false) const;
|
||||
KResult write_block_list_for_inode(InodeIndex, ext2_inode&, const Vector<BlockIndex>&);
|
||||
|
||||
KResultOr<bool> get_inode_allocation_state(InodeIndex) const;
|
||||
KResult set_inode_allocation_state(InodeIndex, bool);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue