mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 01:17:35 +00:00
Kernel: Remove the now defunct LOCKER(..)
macro.
This commit is contained in:
parent
0d5827f865
commit
8d6e9fad40
31 changed files with 196 additions and 198 deletions
|
@ -116,7 +116,7 @@ BlockBasedFS::~BlockBasedFS()
|
|||
|
||||
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(m_logical_block_size);
|
||||
VERIFY(offset + count <= block_size());
|
||||
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
|
||||
|
@ -151,7 +151,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
|
|||
|
||||
bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
u32 base_offset = index.value() * m_logical_block_size;
|
||||
auto seek_result = file_description().seek(base_offset, SEEK_SET);
|
||||
VERIFY(!seek_result.is_error());
|
||||
|
@ -163,7 +163,7 @@ bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
|
|||
|
||||
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
size_t base_offset = index.value() * m_logical_block_size;
|
||||
auto seek_result = file_description().seek(base_offset, SEEK_SET);
|
||||
VERIFY(!seek_result.is_error());
|
||||
|
@ -175,7 +175,7 @@ bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
|
|||
|
||||
bool BlockBasedFS::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelBuffer& buffer)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
auto current = buffer;
|
||||
for (unsigned block = index.value(); block < (index.value() + count); block++) {
|
||||
if (!raw_read(BlockIndex { block }, current))
|
||||
|
@ -187,7 +187,7 @@ bool BlockBasedFS::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelB
|
|||
|
||||
bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOrKernelBuffer& buffer)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
auto current = buffer;
|
||||
for (unsigned block = index.value(); block < (index.value() + count); block++) {
|
||||
if (!raw_write(block, current))
|
||||
|
@ -199,7 +199,7 @@ bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOr
|
|||
|
||||
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(m_logical_block_size);
|
||||
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
|
@ -212,7 +212,7 @@ KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserO
|
|||
|
||||
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(m_logical_block_size);
|
||||
VERIFY(offset + count <= block_size());
|
||||
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
|
||||
|
@ -250,7 +250,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
|
|||
|
||||
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(m_logical_block_size);
|
||||
if (!count)
|
||||
return EINVAL;
|
||||
|
@ -269,7 +269,7 @@ KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernel
|
|||
|
||||
void BlockBasedFS::flush_specific_block_if_needed(BlockIndex index)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (!cache().is_dirty())
|
||||
return;
|
||||
Vector<CacheEntry*, 32> cleaned_entries;
|
||||
|
@ -292,7 +292,7 @@ void BlockBasedFS::flush_specific_block_if_needed(BlockIndex index)
|
|||
|
||||
void BlockBasedFS::flush_writes_impl()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (!cache().is_dirty())
|
||||
return;
|
||||
u32 count = 0;
|
||||
|
|
|
@ -20,7 +20,7 @@ NonnullRefPtr<DevFS> DevFS::create()
|
|||
DevFS::DevFS()
|
||||
: m_root_inode(adopt_ref(*new DevFSRootDirectoryInode(*this)))
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
Device::for_each([&](Device& device) {
|
||||
// FIXME: Find a better way to not add MasterPTYs or SlavePTYs!
|
||||
if (device.is_master_pty() || (device.is_character_device() && device.major() == 201))
|
||||
|
@ -31,7 +31,7 @@ DevFS::DevFS()
|
|||
|
||||
void DevFS::notify_new_device(Device& device)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
auto new_device_inode = adopt_ref(*new DevFSDeviceInode(*this, device));
|
||||
m_nodes.append(new_device_inode);
|
||||
m_root_inode->m_devices.append(new_device_inode);
|
||||
|
@ -39,7 +39,7 @@ void DevFS::notify_new_device(Device& device)
|
|||
|
||||
size_t DevFS::allocate_inode_index()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_next_inode_index = m_next_inode_index.value() + 1;
|
||||
VERIFY(m_next_inode_index > 0);
|
||||
return 1 + m_next_inode_index.value();
|
||||
|
@ -66,7 +66,7 @@ NonnullRefPtr<Inode> DevFS::root_inode() const
|
|||
|
||||
RefPtr<Inode> DevFS::get_inode(InodeIdentifier inode_id) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (inode_id.index() == 1)
|
||||
return m_root_inode;
|
||||
for (auto& node : m_nodes) {
|
||||
|
@ -153,7 +153,7 @@ DevFSLinkInode::DevFSLinkInode(DevFS& fs, String name)
|
|||
}
|
||||
ssize_t DevFSLinkInode::read_bytes(off_t offset, ssize_t, UserOrKernelBuffer& buffer, FileDescription*) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(offset == 0);
|
||||
VERIFY(!m_link.is_null());
|
||||
if (!buffer.write(((const u8*)m_link.substring_view(0).characters_without_null_termination()) + offset, m_link.length()))
|
||||
|
@ -162,7 +162,7 @@ ssize_t DevFSLinkInode::read_bytes(off_t offset, ssize_t, UserOrKernelBuffer& bu
|
|||
}
|
||||
InodeMetadata DevFSLinkInode::metadata() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
InodeMetadata metadata;
|
||||
metadata.inode = { fsid(), index() };
|
||||
metadata.mode = S_IFLNK | 0555;
|
||||
|
@ -174,7 +174,7 @@ InodeMetadata DevFSLinkInode::metadata() const
|
|||
}
|
||||
ssize_t DevFSLinkInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription*)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(offset == 0);
|
||||
VERIFY(buffer.is_kernel_buffer());
|
||||
m_link = buffer.copy_into_string(count);
|
||||
|
@ -190,7 +190,7 @@ DevFSDirectoryInode::~DevFSDirectoryInode()
|
|||
}
|
||||
InodeMetadata DevFSDirectoryInode::metadata() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
InodeMetadata metadata;
|
||||
metadata.inode = { fsid(), 1 };
|
||||
metadata.mode = 0040555;
|
||||
|
@ -202,17 +202,17 @@ InodeMetadata DevFSDirectoryInode::metadata() const
|
|||
}
|
||||
KResult DevFSDirectoryInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return EINVAL;
|
||||
}
|
||||
RefPtr<Inode> DevFSDirectoryInode::lookup(StringView)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return nullptr;
|
||||
}
|
||||
KResultOr<size_t> DevFSDirectoryInode::directory_entry_count() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_devices.size();
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ DevFSRootDirectoryInode::DevFSRootDirectoryInode(DevFS& fs)
|
|||
}
|
||||
KResult DevFSRootDirectoryInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
|
||||
{
|
||||
LOCKER(m_parent_fs.m_lock);
|
||||
Locker locker(m_parent_fs.m_lock);
|
||||
callback({ ".", identifier(), 0 });
|
||||
callback({ "..", identifier(), 0 });
|
||||
|
||||
|
@ -244,7 +244,7 @@ KResult DevFSRootDirectoryInode::traverse_as_directory(Function<bool(const FS::D
|
|||
}
|
||||
RefPtr<Inode> DevFSRootDirectoryInode::lookup(StringView name)
|
||||
{
|
||||
LOCKER(m_parent_fs.m_lock);
|
||||
Locker locker(m_parent_fs.m_lock);
|
||||
for (auto& subfolder : m_subfolders) {
|
||||
if (subfolder.name() == name)
|
||||
return subfolder;
|
||||
|
@ -263,7 +263,7 @@ RefPtr<Inode> DevFSRootDirectoryInode::lookup(StringView name)
|
|||
}
|
||||
KResultOr<NonnullRefPtr<Inode>> DevFSRootDirectoryInode::create_child(const String& name, mode_t mode, dev_t, uid_t, gid_t)
|
||||
{
|
||||
LOCKER(m_parent_fs.m_lock);
|
||||
Locker locker(m_parent_fs.m_lock);
|
||||
|
||||
InodeMetadata metadata;
|
||||
metadata.mode = mode;
|
||||
|
@ -297,7 +297,7 @@ DevFSRootDirectoryInode::~DevFSRootDirectoryInode()
|
|||
}
|
||||
InodeMetadata DevFSRootDirectoryInode::metadata() const
|
||||
{
|
||||
LOCKER(m_parent_fs.m_lock);
|
||||
Locker locker(m_parent_fs.m_lock);
|
||||
InodeMetadata metadata;
|
||||
metadata.inode = { fsid(), 1 };
|
||||
metadata.mode = 0040555;
|
||||
|
@ -309,7 +309,7 @@ InodeMetadata DevFSRootDirectoryInode::metadata() const
|
|||
}
|
||||
KResultOr<size_t> DevFSRootDirectoryInode::directory_entry_count() const
|
||||
{
|
||||
LOCKER(m_parent_fs.m_lock);
|
||||
Locker locker(m_parent_fs.m_lock);
|
||||
return m_devices.size() + DevFSDirectoryInode::directory_entry_count().value();
|
||||
}
|
||||
|
||||
|
@ -323,7 +323,7 @@ DevFSDeviceInode::~DevFSDeviceInode()
|
|||
}
|
||||
KResult DevFSDeviceInode::chown(uid_t uid, gid_t gid)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_uid = uid;
|
||||
m_gid = gid;
|
||||
return KSuccess;
|
||||
|
@ -331,7 +331,7 @@ KResult DevFSDeviceInode::chown(uid_t uid, gid_t gid)
|
|||
|
||||
String DevFSDeviceInode::name() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_cached_name.is_null() || m_cached_name.is_empty())
|
||||
const_cast<DevFSDeviceInode&>(*this).m_cached_name = m_attached_device->device_name();
|
||||
return m_cached_name;
|
||||
|
@ -339,7 +339,7 @@ String DevFSDeviceInode::name() const
|
|||
|
||||
ssize_t DevFSDeviceInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(!!description);
|
||||
if (!m_attached_device->can_read(*description, offset))
|
||||
return 0;
|
||||
|
@ -351,7 +351,7 @@ ssize_t DevFSDeviceInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBu
|
|||
|
||||
InodeMetadata DevFSDeviceInode::metadata() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
InodeMetadata metadata;
|
||||
metadata.inode = { fsid(), index() };
|
||||
metadata.mode = (m_attached_device->is_block_device() ? S_IFBLK : S_IFCHR) | m_attached_device->required_mode();
|
||||
|
@ -365,7 +365,7 @@ InodeMetadata DevFSDeviceInode::metadata() const
|
|||
}
|
||||
ssize_t DevFSDeviceInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription* description)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(!!description);
|
||||
if (!m_attached_device->can_write(*description, offset))
|
||||
return 0;
|
||||
|
@ -381,7 +381,7 @@ DevFSPtsDirectoryInode::DevFSPtsDirectoryInode(DevFS& fs)
|
|||
}
|
||||
KResult DevFSPtsDirectoryInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
callback({ ".", identifier(), 0 });
|
||||
callback({ "..", identifier(), 0 });
|
||||
return KSuccess;
|
||||
|
@ -395,7 +395,7 @@ DevFSPtsDirectoryInode::~DevFSPtsDirectoryInode()
|
|||
}
|
||||
InodeMetadata DevFSPtsDirectoryInode::metadata() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
InodeMetadata metadata;
|
||||
metadata.inode = { fsid(), index() };
|
||||
metadata.mode = 0040555;
|
||||
|
|
|
@ -69,7 +69,7 @@ Ext2FS::~Ext2FS()
|
|||
|
||||
bool Ext2FS::flush_super_block()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
|
||||
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
|
||||
bool success = raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
|
||||
|
@ -87,7 +87,7 @@ const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
|
|||
|
||||
bool Ext2FS::initialize()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
|
||||
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
|
||||
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
|
||||
|
@ -387,7 +387,7 @@ KResult Ext2FSInode::shrink_triply_indirect_block(BlockBasedFS::BlockIndex block
|
|||
|
||||
KResult Ext2FSInode::flush_block_list()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
if (m_block_list.is_empty()) {
|
||||
m_raw_inode.i_blocks = 0;
|
||||
|
@ -628,7 +628,7 @@ Vector<Ext2FS::BlockIndex> Ext2FSInode::compute_block_list_impl_internal(const e
|
|||
|
||||
void Ext2FS::free_inode(Ext2FSInode& inode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(inode.m_raw_inode.i_links_count == 0);
|
||||
dbgln_if(EXT2_DEBUG, "Ext2FS[{}]::free_inode(): Inode {} has no more links, time to delete!", fsid(), inode.index());
|
||||
|
||||
|
@ -662,7 +662,7 @@ void Ext2FS::free_inode(Ext2FSInode& inode)
|
|||
|
||||
void Ext2FS::flush_block_group_descriptor_table()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
unsigned blocks_to_write = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
|
||||
unsigned first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
|
||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)block_group_descriptors());
|
||||
|
@ -672,7 +672,7 @@ void Ext2FS::flush_block_group_descriptor_table()
|
|||
|
||||
void Ext2FS::flush_writes()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_super_block_dirty) {
|
||||
flush_super_block();
|
||||
m_super_block_dirty = false;
|
||||
|
@ -732,7 +732,7 @@ u64 Ext2FSInode::size() const
|
|||
|
||||
InodeMetadata Ext2FSInode::metadata() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
InodeMetadata metadata;
|
||||
metadata.inode = identifier();
|
||||
metadata.size = size();
|
||||
|
@ -759,7 +759,7 @@ InodeMetadata Ext2FSInode::metadata() const
|
|||
|
||||
void Ext2FSInode::flush_metadata()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::flush_metadata(): Flushing inode", identifier());
|
||||
fs().write_ext2_inode(index(), m_raw_inode);
|
||||
if (is_directory()) {
|
||||
|
@ -774,7 +774,7 @@ void Ext2FSInode::flush_metadata()
|
|||
|
||||
RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(inode.fsid() == fsid());
|
||||
|
||||
{
|
||||
|
@ -1049,7 +1049,7 @@ Ext2FS::FeaturesReadOnly Ext2FS::get_features_readonly() const
|
|||
|
||||
KResult Ext2FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(is_directory());
|
||||
|
||||
auto buffer_or = read_entire();
|
||||
|
@ -1073,7 +1073,7 @@ KResult Ext2FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntr
|
|||
|
||||
KResult Ext2FSInode::write_directory(const Vector<Ext2FSDirectoryEntry>& entries)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
int directory_size = 0;
|
||||
for (auto& entry : entries)
|
||||
|
@ -1130,7 +1130,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FSInode::create_child(const String& name, mo
|
|||
|
||||
KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(is_directory());
|
||||
|
||||
if (name.length() > EXT2_NAME_LEN)
|
||||
|
@ -1173,7 +1173,7 @@ KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode
|
|||
|
||||
KResult Ext2FSInode::remove_child(const StringView& name)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::remove_child(): Removing '{}'", identifier(), name);
|
||||
VERIFY(is_directory());
|
||||
|
||||
|
@ -1239,7 +1239,7 @@ bool Ext2FS::write_ext2_inode(InodeIndex inode, const ext2_inode& e2inode)
|
|||
|
||||
auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) -> KResultOr<Vector<BlockIndex>>
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_blocks(preferred group: {}, count {})", preferred_group_index, count);
|
||||
if (count == 0)
|
||||
return Vector<BlockIndex> {};
|
||||
|
@ -1304,7 +1304,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
|
|||
KResultOr<InodeIndex> Ext2FS::allocate_inode(GroupIndex preferred_group)
|
||||
{
|
||||
dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_inode(preferred_group: {})", preferred_group);
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
// FIXME: We shouldn't refuse to allocate an inode if there is no group that can house the whole thing.
|
||||
// In those cases we should just spread it across multiple groups.
|
||||
|
@ -1380,7 +1380,7 @@ auto Ext2FS::group_index_from_inode(InodeIndex inode) const -> GroupIndex
|
|||
|
||||
KResultOr<bool> Ext2FS::get_inode_allocation_state(InodeIndex index) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (index == 0)
|
||||
return EINVAL;
|
||||
auto group_index = group_index_from_inode(index);
|
||||
|
@ -1423,7 +1423,7 @@ KResult Ext2FS::update_bitmap_block(BlockIndex bitmap_block, size_t bit_index, b
|
|||
|
||||
KResult Ext2FS::set_inode_allocation_state(InodeIndex inode_index, bool new_state)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
auto group_index = group_index_from_inode(inode_index);
|
||||
unsigned index_in_group = inode_index.value() - ((group_index.value() - 1) * inodes_per_group());
|
||||
unsigned bit_index = (index_in_group - 1) % inodes_per_group();
|
||||
|
@ -1458,7 +1458,7 @@ KResultOr<Ext2FS::CachedBitmap*> Ext2FS::get_bitmap_block(BlockIndex bitmap_bloc
|
|||
KResult Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
|
||||
{
|
||||
VERIFY(block_index != 0);
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
auto group_index = group_index_from_block_index(block_index);
|
||||
unsigned index_in_group = (block_index.value() - first_block_index().value()) - ((group_index.value() - 1) * blocks_per_group());
|
||||
|
@ -1471,7 +1471,7 @@ KResult Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_stat
|
|||
|
||||
KResult Ext2FS::create_directory(Ext2FSInode& parent_inode, const String& name, mode_t mode, uid_t uid, gid_t gid)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(is_directory(mode));
|
||||
|
||||
auto inode_or_error = create_inode(parent_inode, name, mode, 0, uid, gid);
|
||||
|
@ -1546,7 +1546,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
|
|||
|
||||
bool Ext2FSInode::populate_lookup_cache() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (!m_lookup_cache.is_empty())
|
||||
return true;
|
||||
HashMap<String, InodeIndex> children;
|
||||
|
@ -1571,7 +1571,7 @@ RefPtr<Inode> Ext2FSInode::lookup(StringView name)
|
|||
dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]:lookup(): Looking up '{}'", identifier(), name);
|
||||
if (!populate_lookup_cache())
|
||||
return {};
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
auto it = m_lookup_cache.find(name.hash(), [&](auto& entry) { return entry.key == name; });
|
||||
if (it != m_lookup_cache.end())
|
||||
return fs().get_inode({ fsid(), (*it).value });
|
||||
|
@ -1586,7 +1586,7 @@ void Ext2FSInode::one_ref_left()
|
|||
|
||||
int Ext2FSInode::set_atime(time_t t)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (fs().is_readonly())
|
||||
return -EROFS;
|
||||
m_raw_inode.i_atime = t;
|
||||
|
@ -1596,7 +1596,7 @@ int Ext2FSInode::set_atime(time_t t)
|
|||
|
||||
int Ext2FSInode::set_ctime(time_t t)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (fs().is_readonly())
|
||||
return -EROFS;
|
||||
m_raw_inode.i_ctime = t;
|
||||
|
@ -1606,7 +1606,7 @@ int Ext2FSInode::set_ctime(time_t t)
|
|||
|
||||
int Ext2FSInode::set_mtime(time_t t)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (fs().is_readonly())
|
||||
return -EROFS;
|
||||
m_raw_inode.i_mtime = t;
|
||||
|
@ -1616,7 +1616,7 @@ int Ext2FSInode::set_mtime(time_t t)
|
|||
|
||||
KResult Ext2FSInode::increment_link_count()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (fs().is_readonly())
|
||||
return EROFS;
|
||||
if (m_raw_inode.i_links_count == max_link_count)
|
||||
|
@ -1628,7 +1628,7 @@ KResult Ext2FSInode::increment_link_count()
|
|||
|
||||
KResult Ext2FSInode::decrement_link_count()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (fs().is_readonly())
|
||||
return EROFS;
|
||||
VERIFY(m_raw_inode.i_links_count);
|
||||
|
@ -1641,21 +1641,21 @@ KResult Ext2FSInode::decrement_link_count()
|
|||
|
||||
void Ext2FS::uncache_inode(InodeIndex index)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_inode_cache.remove(index);
|
||||
}
|
||||
|
||||
KResultOr<size_t> Ext2FSInode::directory_entry_count() const
|
||||
{
|
||||
VERIFY(is_directory());
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
populate_lookup_cache();
|
||||
return m_lookup_cache.size();
|
||||
}
|
||||
|
||||
KResult Ext2FSInode::chmod(mode_t mode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_raw_inode.i_mode == mode)
|
||||
return KSuccess;
|
||||
m_raw_inode.i_mode = mode;
|
||||
|
@ -1665,7 +1665,7 @@ KResult Ext2FSInode::chmod(mode_t mode)
|
|||
|
||||
KResult Ext2FSInode::chown(uid_t uid, gid_t gid)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_raw_inode.i_uid == uid && m_raw_inode.i_gid == gid)
|
||||
return KSuccess;
|
||||
m_raw_inode.i_uid = uid;
|
||||
|
@ -1676,7 +1676,7 @@ KResult Ext2FSInode::chown(uid_t uid, gid_t gid)
|
|||
|
||||
KResult Ext2FSInode::truncate(u64 size)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (static_cast<u64>(m_raw_inode.i_size) == size)
|
||||
return KSuccess;
|
||||
if (auto result = resize(size); result.is_error())
|
||||
|
@ -1687,7 +1687,7 @@ KResult Ext2FSInode::truncate(u64 size)
|
|||
|
||||
KResultOr<int> Ext2FSInode::get_block_address(int index)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
if (m_block_list.is_empty())
|
||||
m_block_list = compute_block_list();
|
||||
|
@ -1700,31 +1700,31 @@ KResultOr<int> Ext2FSInode::get_block_address(int index)
|
|||
|
||||
unsigned Ext2FS::total_block_count() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return super_block().s_blocks_count;
|
||||
}
|
||||
|
||||
unsigned Ext2FS::free_block_count() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return super_block().s_free_blocks_count;
|
||||
}
|
||||
|
||||
unsigned Ext2FS::total_inode_count() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return super_block().s_inodes_count;
|
||||
}
|
||||
|
||||
unsigned Ext2FS::free_inode_count() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return super_block().s_free_inodes_count;
|
||||
}
|
||||
|
||||
KResult Ext2FS::prepare_to_unmount() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
for (auto& it : m_inode_cache) {
|
||||
if (it.value->ref_count() > 1)
|
||||
|
|
|
@ -74,7 +74,7 @@ KResultOr<NonnullRefPtr<FileDescription>> FIFO::open_direction_blocking(FIFO::Di
|
|||
FIFO::FIFO(uid_t uid)
|
||||
: m_uid(uid)
|
||||
{
|
||||
LOCKER(all_fifos().lock());
|
||||
Locker locker(all_fifos().lock());
|
||||
all_fifos().resource().set(this);
|
||||
m_fifo_id = ++s_next_fifo_id;
|
||||
|
||||
|
@ -86,7 +86,7 @@ FIFO::FIFO(uid_t uid)
|
|||
|
||||
FIFO::~FIFO()
|
||||
{
|
||||
LOCKER(all_fifos().lock());
|
||||
Locker locker(all_fifos().lock());
|
||||
all_fifos().resource().remove(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ Thread::FileBlocker::BlockFlags FileDescription::should_unblock(Thread::FileBloc
|
|||
|
||||
KResult FileDescription::stat(::stat& buffer)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
// FIXME: This is a little awkward, why can't we always forward to File::stat()?
|
||||
if (m_inode)
|
||||
return metadata().stat(buffer);
|
||||
|
@ -108,7 +108,7 @@ KResult FileDescription::stat(::stat& buffer)
|
|||
|
||||
KResultOr<off_t> FileDescription::seek(off_t offset, int whence)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (!m_file->is_seekable())
|
||||
return ESPIPE;
|
||||
|
||||
|
@ -147,7 +147,7 @@ KResultOr<off_t> FileDescription::seek(off_t offset, int whence)
|
|||
|
||||
KResultOr<size_t> FileDescription::read(UserOrKernelBuffer& buffer, size_t count)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (Checked<off_t>::addition_would_overflow(m_current_offset, count))
|
||||
return EOVERFLOW;
|
||||
auto nread_or_error = m_file->read(*this, offset(), buffer, count);
|
||||
|
@ -161,7 +161,7 @@ KResultOr<size_t> FileDescription::read(UserOrKernelBuffer& buffer, size_t count
|
|||
|
||||
KResultOr<size_t> FileDescription::write(const UserOrKernelBuffer& data, size_t size)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (Checked<off_t>::addition_would_overflow(m_current_offset, size))
|
||||
return EOVERFLOW;
|
||||
auto nwritten_or_error = m_file->write(*this, offset(), data, size);
|
||||
|
@ -193,7 +193,7 @@ KResultOr<NonnullOwnPtr<KBuffer>> FileDescription::read_entire_file()
|
|||
|
||||
ssize_t FileDescription::get_dir_entries(UserOrKernelBuffer& buffer, ssize_t size)
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
if (!is_directory())
|
||||
return -ENOTDIR;
|
||||
|
||||
|
@ -309,13 +309,13 @@ InodeMetadata FileDescription::metadata() const
|
|||
|
||||
KResultOr<Region*> FileDescription::mmap(Process& process, const Range& range, u64 offset, int prot, bool shared)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_file->mmap(process, *this, range, offset, prot, shared);
|
||||
}
|
||||
|
||||
KResult FileDescription::truncate(u64 length)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_file->truncate(length);
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ const Socket* FileDescription::socket() const
|
|||
|
||||
void FileDescription::set_file_flags(u32 flags)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_is_blocking = !(flags & O_NONBLOCK);
|
||||
m_should_append = flags & O_APPEND;
|
||||
m_direct = flags & O_DIRECT;
|
||||
|
@ -361,13 +361,13 @@ void FileDescription::set_file_flags(u32 flags)
|
|||
|
||||
KResult FileDescription::chmod(mode_t mode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_file->chmod(*this, mode);
|
||||
}
|
||||
|
||||
KResult FileDescription::chown(uid_t uid, gid_t gid)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_file->chown(*this, uid, gid);
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ Inode::~Inode()
|
|||
|
||||
void Inode::will_be_destroyed()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_metadata_dirty)
|
||||
flush_metadata();
|
||||
}
|
||||
|
@ -144,13 +144,13 @@ KResult Inode::decrement_link_count()
|
|||
|
||||
void Inode::set_shared_vmobject(SharedInodeVMObject& vmobject)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_shared_vmobject = vmobject;
|
||||
}
|
||||
|
||||
bool Inode::bind_socket(LocalSocket& socket)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_socket)
|
||||
return false;
|
||||
m_socket = socket;
|
||||
|
@ -159,7 +159,7 @@ bool Inode::bind_socket(LocalSocket& socket)
|
|||
|
||||
bool Inode::unbind_socket()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (!m_socket)
|
||||
return false;
|
||||
m_socket = nullptr;
|
||||
|
@ -168,21 +168,21 @@ bool Inode::unbind_socket()
|
|||
|
||||
void Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(!m_watchers.contains(&watcher));
|
||||
m_watchers.set(&watcher);
|
||||
}
|
||||
|
||||
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(m_watchers.contains(&watcher));
|
||||
m_watchers.remove(&watcher);
|
||||
}
|
||||
|
||||
NonnullRefPtr<FIFO> Inode::fifo()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(metadata().is_fifo());
|
||||
|
||||
// FIXME: Release m_fifo when it is closed by all readers and writers
|
||||
|
@ -195,7 +195,7 @@ NonnullRefPtr<FIFO> Inode::fifo()
|
|||
|
||||
void Inode::set_metadata_dirty(bool metadata_dirty)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
if (metadata_dirty) {
|
||||
// Sanity check.
|
||||
|
@ -217,7 +217,7 @@ void Inode::set_metadata_dirty(bool metadata_dirty)
|
|||
|
||||
void Inode::did_add_child(const InodeIdentifier& child_id)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
for (auto& watcher : m_watchers) {
|
||||
watcher->notify_child_added({}, child_id);
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ void Inode::did_add_child(const InodeIdentifier& child_id)
|
|||
|
||||
void Inode::did_remove_child(const InodeIdentifier& child_id)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
for (auto& watcher : m_watchers) {
|
||||
watcher->notify_child_removed({}, child_id);
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ KResult Inode::prepare_to_write_data()
|
|||
{
|
||||
// FIXME: It's a poor design that filesystems are expected to call this before writing out data.
|
||||
// We should funnel everything through an interface at the VFS layer so this can happen from a single place.
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (fs().is_readonly())
|
||||
return EROFS;
|
||||
auto metadata = this->metadata();
|
||||
|
@ -248,13 +248,13 @@ KResult Inode::prepare_to_write_data()
|
|||
|
||||
RefPtr<SharedInodeVMObject> Inode::shared_vmobject() const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_shared_vmobject.strong_ref();
|
||||
}
|
||||
|
||||
bool Inode::is_shared_vmobject(const SharedInodeVMObject& other) const
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
return m_shared_vmobject.unsafe_ptr() == &other;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ bool InodeWatcher::can_write(const FileDescription&, size_t) const
|
|||
|
||||
KResultOr<size_t> InodeWatcher::read(FileDescription&, u64, UserOrKernelBuffer& buffer, size_t buffer_size)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(!m_queue.is_empty() || !m_inode);
|
||||
|
||||
if (!m_inode)
|
||||
|
@ -76,21 +76,21 @@ String InodeWatcher::absolute_path(const FileDescription&) const
|
|||
|
||||
void InodeWatcher::notify_inode_event(Badge<Inode>, InodeWatcherEvent::Type event_type)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_queue.enqueue({ event_type });
|
||||
evaluate_block_conditions();
|
||||
}
|
||||
|
||||
void InodeWatcher::notify_child_added(Badge<Inode>, const InodeIdentifier& child_id)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_queue.enqueue({ InodeWatcherEvent::Type::ChildAdded, child_id.index().value() });
|
||||
evaluate_block_conditions();
|
||||
}
|
||||
|
||||
void InodeWatcher::notify_child_removed(Badge<Inode>, const InodeIdentifier& child_id)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
m_queue.enqueue({ InodeWatcherEvent::Type::ChildRemoved, child_id.index().value() });
|
||||
evaluate_block_conditions();
|
||||
}
|
||||
|
|
|
@ -475,7 +475,7 @@ void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker)
|
|||
|
||||
bool Plan9FS::is_complete(const ReceiveCompletion& completion)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
if (m_completions.contains(completion.tag)) {
|
||||
// If it's still in the map then it can't be complete
|
||||
VERIFY(!completion.completed);
|
||||
|
@ -495,12 +495,12 @@ KResult Plan9FS::post_message(Message& message, RefPtr<ReceiveCompletion> comple
|
|||
size_t size = buffer.size();
|
||||
auto& description = file_description();
|
||||
|
||||
LOCKER(m_send_lock);
|
||||
Locker locker(m_send_lock);
|
||||
|
||||
if (completion) {
|
||||
// Save the completion record *before* we send the message. This
|
||||
// ensures that it exists when the thread reads the response
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
auto tag = completion->tag;
|
||||
m_completions.set(tag, completion.release_nonnull());
|
||||
// TODO: What if there is a collision? Do we need to wait until
|
||||
|
@ -569,7 +569,7 @@ KResult Plan9FS::read_and_dispatch_one_message()
|
|||
if (result.is_error())
|
||||
return result;
|
||||
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
auto optional_completion = m_completions.get(header.tag);
|
||||
if (optional_completion.has_value()) {
|
||||
|
@ -647,7 +647,7 @@ void Plan9FS::thread_main()
|
|||
auto result = read_and_dispatch_one_message();
|
||||
if (result.is_error()) {
|
||||
// If we fail to read, wake up everyone with an error.
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
for (auto& it : m_completions) {
|
||||
it.value->result = result;
|
||||
|
@ -698,7 +698,7 @@ KResult Plan9FSInode::ensure_open_for_mode(int mode)
|
|||
u8 p9_mode = 0;
|
||||
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
// If it's already open in this mode, we're done.
|
||||
if ((m_open_mode & mode) == mode)
|
||||
|
|
|
@ -503,7 +503,7 @@ static bool procfs$net_adapters(InodeIdentifier, KBufferBuilder& builder)
|
|||
static bool procfs$net_arp(InodeIdentifier, KBufferBuilder& builder)
|
||||
{
|
||||
JsonArraySerializer array { builder };
|
||||
LOCKER(arp_table().lock(), Lock::Mode::Shared);
|
||||
Locker locker(arp_table().lock(), Lock::Mode::Shared);
|
||||
for (auto& it : arp_table().resource()) {
|
||||
auto obj = array.add_object();
|
||||
obj.add("mac_address", it.value.to_string());
|
||||
|
@ -884,7 +884,7 @@ static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
|
|||
u8 buffer[2];
|
||||
auto* lockable_bool = reinterpret_cast<Lockable<bool>*>(variable.address);
|
||||
{
|
||||
LOCKER(lockable_bool->lock(), Lock::Mode::Shared);
|
||||
Locker locker(lockable_bool->lock(), Lock::Mode::Shared);
|
||||
buffer[0] = lockable_bool->resource() ? '1' : '0';
|
||||
}
|
||||
buffer[1] = '\n';
|
||||
|
@ -914,7 +914,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
|
|||
|
||||
auto* lockable_bool = reinterpret_cast<Lockable<bool>*>(variable.address);
|
||||
{
|
||||
LOCKER(lockable_bool->lock());
|
||||
Locker locker(lockable_bool->lock());
|
||||
lockable_bool->resource() = value == '1';
|
||||
}
|
||||
variable.notify();
|
||||
|
@ -927,7 +927,7 @@ static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
|
|||
VERIFY(variable.type == SysVariable::Type::String);
|
||||
|
||||
auto* lockable_string = reinterpret_cast<Lockable<String>*>(variable.address);
|
||||
LOCKER(lockable_string->lock(), Lock::Mode::Shared);
|
||||
Locker locker(lockable_string->lock(), Lock::Mode::Shared);
|
||||
builder.append_bytes(lockable_string->resource().bytes());
|
||||
return true;
|
||||
}
|
||||
|
@ -943,7 +943,7 @@ static ssize_t write_sys_string(InodeIdentifier inode_id, const UserOrKernelBuff
|
|||
|
||||
{
|
||||
auto* lockable_string = reinterpret_cast<Lockable<String>*>(variable.address);
|
||||
LOCKER(lockable_string->lock());
|
||||
Locker locker(lockable_string->lock());
|
||||
lockable_string->resource() = move(string_copy);
|
||||
}
|
||||
variable.notify();
|
||||
|
@ -1012,7 +1012,7 @@ RefPtr<Inode> ProcFS::get_inode(InodeIdentifier inode_id) const
|
|||
if (inode_id == root_inode()->identifier())
|
||||
return m_root_inode;
|
||||
|
||||
LOCKER(m_inodes_lock);
|
||||
Locker locker(m_inodes_lock);
|
||||
auto it = m_inodes.find(inode_id.index().value());
|
||||
if (it != m_inodes.end()) {
|
||||
// It's possible that the ProcFSInode ref count was dropped to 0 or
|
||||
|
@ -1037,7 +1037,7 @@ ProcFSInode::ProcFSInode(ProcFS& fs, InodeIndex index)
|
|||
|
||||
ProcFSInode::~ProcFSInode()
|
||||
{
|
||||
LOCKER(fs().m_inodes_lock);
|
||||
Locker locker(fs().m_inodes_lock);
|
||||
auto it = fs().m_inodes.find(index().value());
|
||||
if (it != fs().m_inodes.end() && it->value == this)
|
||||
fs().m_inodes.remove(it);
|
||||
|
|
|
@ -39,7 +39,7 @@ NonnullRefPtr<Inode> TmpFS::root_inode() const
|
|||
|
||||
void TmpFS::register_inode(TmpFSInode& inode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(inode.identifier().fsid() == fsid());
|
||||
|
||||
auto index = inode.identifier().index();
|
||||
|
@ -48,7 +48,7 @@ void TmpFS::register_inode(TmpFSInode& inode)
|
|||
|
||||
void TmpFS::unregister_inode(InodeIdentifier identifier)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(identifier.fsid() == fsid());
|
||||
|
||||
m_inodes.remove(identifier.index());
|
||||
|
@ -56,14 +56,14 @@ void TmpFS::unregister_inode(InodeIdentifier identifier)
|
|||
|
||||
unsigned TmpFS::next_inode_index()
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
return m_next_inode_index++;
|
||||
}
|
||||
|
||||
RefPtr<Inode> TmpFS::get_inode(InodeIdentifier identifier) const
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
VERIFY(identifier.fsid() == fsid());
|
||||
|
||||
auto it = m_inodes.find(identifier.index());
|
||||
|
@ -104,14 +104,14 @@ NonnullRefPtr<TmpFSInode> TmpFSInode::create_root(TmpFS& fs)
|
|||
|
||||
InodeMetadata TmpFSInode::metadata() const
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
|
||||
return m_metadata;
|
||||
}
|
||||
|
||||
KResult TmpFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
|
||||
if (!is_directory())
|
||||
return ENOTDIR;
|
||||
|
@ -128,7 +128,7 @@ KResult TmpFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntry
|
|||
|
||||
ssize_t TmpFSInode::read_bytes(off_t offset, ssize_t size, UserOrKernelBuffer& buffer, FileDescription*) const
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
VERIFY(!is_directory());
|
||||
VERIFY(size >= 0);
|
||||
VERIFY(offset >= 0);
|
||||
|
@ -149,7 +149,7 @@ ssize_t TmpFSInode::read_bytes(off_t offset, ssize_t size, UserOrKernelBuffer& b
|
|||
|
||||
ssize_t TmpFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelBuffer& buffer, FileDescription*)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(!is_directory());
|
||||
VERIFY(offset >= 0);
|
||||
|
||||
|
@ -193,7 +193,7 @@ ssize_t TmpFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelBu
|
|||
|
||||
RefPtr<Inode> TmpFSInode::lookup(StringView name)
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
VERIFY(is_directory());
|
||||
|
||||
if (name == ".")
|
||||
|
@ -209,7 +209,7 @@ RefPtr<Inode> TmpFSInode::lookup(StringView name)
|
|||
|
||||
KResultOr<size_t> TmpFSInode::directory_entry_count() const
|
||||
{
|
||||
LOCKER(m_lock, Lock::Mode::Shared);
|
||||
Locker locker(m_lock, Lock::Mode::Shared);
|
||||
VERIFY(is_directory());
|
||||
return 2 + m_children.size();
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ void TmpFSInode::flush_metadata()
|
|||
|
||||
KResult TmpFSInode::chmod(mode_t mode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
m_metadata.mode = mode;
|
||||
notify_watchers();
|
||||
|
@ -241,7 +241,7 @@ KResult TmpFSInode::chmod(mode_t mode)
|
|||
|
||||
KResult TmpFSInode::chown(uid_t uid, gid_t gid)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
m_metadata.uid = uid;
|
||||
m_metadata.gid = gid;
|
||||
|
@ -251,7 +251,7 @@ KResult TmpFSInode::chown(uid_t uid, gid_t gid)
|
|||
|
||||
KResultOr<NonnullRefPtr<Inode>> TmpFSInode::create_child(const String& name, mode_t mode, dev_t dev, uid_t uid, gid_t gid)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
// TODO: Support creating devices on TmpFS.
|
||||
if (dev != 0)
|
||||
|
@ -276,7 +276,7 @@ KResultOr<NonnullRefPtr<Inode>> TmpFSInode::create_child(const String& name, mod
|
|||
|
||||
KResult TmpFSInode::add_child(Inode& child, const StringView& name, mode_t)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(is_directory());
|
||||
VERIFY(child.fsid() == fsid());
|
||||
|
||||
|
@ -290,7 +290,7 @@ KResult TmpFSInode::add_child(Inode& child, const StringView& name, mode_t)
|
|||
|
||||
KResult TmpFSInode::remove_child(const StringView& name)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(is_directory());
|
||||
|
||||
if (name == "." || name == "..")
|
||||
|
@ -307,7 +307,7 @@ KResult TmpFSInode::remove_child(const StringView& name)
|
|||
|
||||
KResult TmpFSInode::truncate(u64 size)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
VERIFY(!is_directory());
|
||||
|
||||
if (size == 0)
|
||||
|
@ -337,7 +337,7 @@ KResult TmpFSInode::truncate(u64 size)
|
|||
|
||||
int TmpFSInode::set_atime(time_t time)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
m_metadata.atime = time;
|
||||
set_metadata_dirty(true);
|
||||
|
@ -347,7 +347,7 @@ int TmpFSInode::set_atime(time_t time)
|
|||
|
||||
int TmpFSInode::set_ctime(time_t time)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
m_metadata.ctime = time;
|
||||
notify_watchers();
|
||||
|
@ -356,7 +356,7 @@ int TmpFSInode::set_ctime(time_t time)
|
|||
|
||||
int TmpFSInode::set_mtime(time_t time)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
m_metadata.mtime = time;
|
||||
notify_watchers();
|
||||
|
|
|
@ -50,7 +50,7 @@ InodeIdentifier VFS::root_inode_id() const
|
|||
|
||||
KResult VFS::mount(FS& file_system, Custody& mount_point, int flags)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
auto& inode = mount_point.inode();
|
||||
dbgln("VFS: Mounting {} at {} (inode: {}) with flags {}",
|
||||
|
@ -66,7 +66,7 @@ KResult VFS::mount(FS& file_system, Custody& mount_point, int flags)
|
|||
|
||||
KResult VFS::bind_mount(Custody& source, Custody& mount_point, int flags)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
dbgln("VFS: Bind-mounting {} at {}", source.absolute_path(), mount_point.absolute_path());
|
||||
// FIXME: check that this is not already a mount point
|
||||
|
@ -77,7 +77,7 @@ KResult VFS::bind_mount(Custody& source, Custody& mount_point, int flags)
|
|||
|
||||
KResult VFS::remount(Custody& mount_point, int new_flags)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
|
||||
dbgln("VFS: Remounting {}", mount_point.absolute_path());
|
||||
|
||||
|
@ -91,7 +91,7 @@ KResult VFS::remount(Custody& mount_point, int new_flags)
|
|||
|
||||
KResult VFS::unmount(Inode& guest_inode)
|
||||
{
|
||||
LOCKER(m_lock);
|
||||
Locker locker(m_lock);
|
||||
dbgln("VFS: unmount called with inode {}", guest_inode.identifier());
|
||||
|
||||
for (size_t i = 0; i < m_mounts.size(); ++i) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue