mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 16:47:36 +00:00
Kernel: Implement flock downgrading
This commit makes it possible for a process to downgrade a file lock it holds from a write (exclusive) lock to a read (shared) lock. For this, the process must point to the exact range of the flock, and must be the owner of the lock.
This commit is contained in:
parent
9b425b860c
commit
3275015786
2 changed files with 39 additions and 19 deletions
|
@ -298,7 +298,7 @@ static inline ErrorOr<void> normalize_flock(OpenFileDescription const& descripti
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Inode::can_apply_flock(flock const& new_lock) const
|
bool Inode::can_apply_flock(flock const& new_lock, Optional<OpenFileDescription const&> description) const
|
||||||
{
|
{
|
||||||
VERIFY(new_lock.l_whence == SEEK_SET);
|
VERIFY(new_lock.l_whence == SEEK_SET);
|
||||||
|
|
||||||
|
@ -310,8 +310,19 @@ bool Inode::can_apply_flock(flock const& new_lock) const
|
||||||
if (!range_overlap(lock.start, lock.len, new_lock.l_start, new_lock.l_len))
|
if (!range_overlap(lock.start, lock.len, new_lock.l_start, new_lock.l_len))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
// There are two cases where we can attempt downgrade:
|
||||||
|
//
|
||||||
|
// 1) We're the owner of this lock. The downgrade will immediately
|
||||||
|
// succeed.
|
||||||
|
// 2) We're not the owner of this lock. Our downgrade attempt will
|
||||||
|
// fail, and the thread will start blocking on an FlockBlocker.
|
||||||
|
//
|
||||||
|
// For the first case, we get the description from try_apply_flock
|
||||||
|
// below. For the second case, the check below would always be
|
||||||
|
// false, so there is no need to store the description in the
|
||||||
|
// blocker in the first place.
|
||||||
if (new_lock.l_type == F_RDLCK && lock.type == F_WRLCK)
|
if (new_lock.l_type == F_RDLCK && lock.type == F_WRLCK)
|
||||||
return false;
|
return description.has_value() && lock.owner == &description.value() && lock.start == new_lock.l_start && lock.len == new_lock.l_len;
|
||||||
|
|
||||||
if (new_lock.l_type == F_WRLCK)
|
if (new_lock.l_type == F_WRLCK)
|
||||||
return false;
|
return false;
|
||||||
|
@ -320,29 +331,38 @@ bool Inode::can_apply_flock(flock const& new_lock) const
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
ErrorOr<bool> Inode::try_apply_flock(Process const& process, OpenFileDescription const& description, flock const& lock)
|
ErrorOr<bool> Inode::try_apply_flock(Process const& process, OpenFileDescription const& description, flock const& new_lock)
|
||||||
{
|
{
|
||||||
return m_flocks.with([&](auto& flocks) -> ErrorOr<bool> {
|
return m_flocks.with([&](auto& flocks) -> ErrorOr<bool> {
|
||||||
if (!can_apply_flock(lock))
|
if (!can_apply_flock(new_lock, description))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (lock.l_type == F_UNLCK) {
|
bool did_manipulate_lock = false;
|
||||||
bool any_locks_unlocked = false;
|
for (size_t i = 0; i < flocks.size(); ++i) {
|
||||||
for (size_t i = 0; i < flocks.size(); ++i) {
|
auto const& lock = flocks[i];
|
||||||
if (&description == flocks[i].owner && flocks[i].start == lock.l_start && flocks[i].len == lock.l_len) {
|
|
||||||
flocks.remove(i);
|
bool is_potential_downgrade = new_lock.l_type == F_RDLCK && lock.type == F_WRLCK;
|
||||||
any_locks_unlocked |= true;
|
bool is_potential_unlock = new_lock.l_type == F_UNLCK;
|
||||||
}
|
|
||||||
|
bool is_lock_owner = &description == lock.owner;
|
||||||
|
bool lock_range_exactly_matches = lock.start == new_lock.l_start && lock.len == new_lock.l_len;
|
||||||
|
bool can_manage_this_lock = is_lock_owner && lock_range_exactly_matches;
|
||||||
|
|
||||||
|
if ((is_potential_downgrade || is_potential_unlock) && can_manage_this_lock) {
|
||||||
|
flocks.remove(i);
|
||||||
|
did_manipulate_lock = true;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (any_locks_unlocked)
|
|
||||||
m_flock_blocker_set.unblock_all_blockers_whose_conditions_are_met();
|
|
||||||
|
|
||||||
// Judging by the Linux implementation, unlocking a non-existent lock also works.
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TRY(flocks.try_append(Flock { lock.l_start, lock.l_len, &description, process.pid().value(), lock.l_type }));
|
if (new_lock.l_type != F_UNLCK)
|
||||||
|
TRY(flocks.try_append(Flock { new_lock.l_start, new_lock.l_len, &description, process.pid().value(), new_lock.l_type }));
|
||||||
|
|
||||||
|
if (did_manipulate_lock)
|
||||||
|
m_flock_blocker_set.unblock_all_blockers_whose_conditions_are_met();
|
||||||
|
|
||||||
|
// Judging by the Linux implementation, unlocking a non-existent lock
|
||||||
|
// also works.
|
||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,7 @@ public:
|
||||||
|
|
||||||
ErrorOr<NonnullLockRefPtr<FIFO>> fifo();
|
ErrorOr<NonnullLockRefPtr<FIFO>> fifo();
|
||||||
|
|
||||||
bool can_apply_flock(flock const&) const;
|
bool can_apply_flock(flock const&, Optional<OpenFileDescription const&> = {}) const;
|
||||||
ErrorOr<void> apply_flock(Process const&, OpenFileDescription const&, Userspace<flock const*>, ShouldBlock);
|
ErrorOr<void> apply_flock(Process const&, OpenFileDescription const&, Userspace<flock const*>, ShouldBlock);
|
||||||
ErrorOr<void> get_flock(OpenFileDescription const&, Userspace<flock*>) const;
|
ErrorOr<void> get_flock(OpenFileDescription const&, Userspace<flock*>) const;
|
||||||
void remove_flocks_for_description(OpenFileDescription const&);
|
void remove_flocks_for_description(OpenFileDescription const&);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue