1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 04:08:11 +00:00

Kernel: Protect Inode flock list with spinlock instead of mutex

This commit is contained in:
Andreas Kling 2022-02-03 17:28:45 +01:00
parent a81aebfd6e
commit e7dc9f71b8
2 changed files with 45 additions and 48 deletions

View file

@ -289,17 +289,15 @@ ErrorOr<void> Inode::can_apply_flock(OpenFileDescription const& description, flo
{
VERIFY(new_lock.l_whence == SEEK_SET);
MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
return m_flocks.with([&](auto& flocks) -> ErrorOr<void> {
if (new_lock.l_type == F_UNLCK) {
for (auto const& lock : m_flocks) {
for (auto const& lock : flocks) {
if (&description == lock.owner && lock.start == new_lock.l_start && lock.len == new_lock.l_len)
return {};
}
return EINVAL;
}
for (auto const& lock : m_flocks) {
for (auto const& lock : flocks) {
if (!range_overlap(lock.start, lock.len, new_lock.l_start, new_lock.l_len))
continue;
@ -310,6 +308,7 @@ ErrorOr<void> Inode::can_apply_flock(OpenFileDescription const& description, flo
return EAGAIN;
}
return {};
});
}
ErrorOr<void> Inode::apply_flock(Process const& process, OpenFileDescription const& description, Userspace<flock const*> input_lock)
@ -317,22 +316,22 @@ ErrorOr<void> Inode::apply_flock(Process const& process, OpenFileDescription con
auto new_lock = TRY(copy_typed_from_user(input_lock));
TRY(normalize_flock(description, new_lock));
MutexLocker locker(m_inode_lock);
return m_flocks.with([&](auto& flocks) -> ErrorOr<void> {
TRY(can_apply_flock(description, new_lock));
if (new_lock.l_type == F_UNLCK) {
for (size_t i = 0; i < m_flocks.size(); ++i) {
if (&description == m_flocks[i].owner && m_flocks[i].start == new_lock.l_start && m_flocks[i].len == new_lock.l_len) {
m_flocks.remove(i);
for (size_t i = 0; i < flocks.size(); ++i) {
if (&description == flocks[i].owner && flocks[i].start == new_lock.l_start && flocks[i].len == new_lock.l_len) {
flocks.remove(i);
return {};
}
}
return EINVAL;
}
TRY(m_flocks.try_append(Flock { new_lock.l_start, new_lock.l_len, &description, process.pid().value(), new_lock.l_type }));
TRY(flocks.try_append(Flock { new_lock.l_start, new_lock.l_len, &description, process.pid().value(), new_lock.l_type }));
return {};
});
}
ErrorOr<void> Inode::get_flock(OpenFileDescription const& description, Userspace<flock*> reference_lock) const
@ -341,9 +340,8 @@ ErrorOr<void> Inode::get_flock(OpenFileDescription const& description, Userspace
TRY(copy_from_user(&lookup, reference_lock));
TRY(normalize_flock(description, lookup));
MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
for (auto const& lock : m_flocks) {
return m_flocks.with([&](auto& flocks) {
for (auto const& lock : flocks) {
if (!range_overlap(lock.start, lock.len, lookup.l_start, lookup.l_len))
continue;
@ -355,17 +353,16 @@ ErrorOr<void> Inode::get_flock(OpenFileDescription const& description, Userspace
lookup.l_type = F_UNLCK;
return copy_to_user(reference_lock, &lookup);
});
}
void Inode::remove_flocks_for_description(OpenFileDescription const& description)
{
MutexLocker locker(m_inode_lock);
for (size_t i = 0; i < m_flocks.size(); ++i) {
if (&description == m_flocks[i].owner)
m_flocks.remove(i--);
}
m_flocks.with([&](auto& flocks) {
flocks.remove_all_matching([&](auto& entry) { return entry.owner == &description; });
});
}
bool Inode::has_watchers() const
{
return !m_watchers.with([&](auto& watchers) { return watchers.is_empty(); });

View file

@ -131,7 +131,7 @@ private:
short type;
};
Vector<Flock> m_flocks;
SpinlockProtected<Vector<Flock>> m_flocks;
public:
using AllInstancesList = IntrusiveList<&Inode::m_inode_list_node>;