1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-23 18:45:07 +00:00

Kernel: Remove the now defunct LOCKER(..) macro.

This commit is contained in:
Brian Gianforcaro 2021-04-24 15:27:32 -07:00 committed by Andreas Kling
parent 0d5827f865
commit 8d6e9fad40
31 changed files with 196 additions and 198 deletions

View file

@ -116,7 +116,7 @@ BlockBasedFS::~BlockBasedFS()
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
{
LOCKER(m_lock);
Locker locker(m_lock);
VERIFY(m_logical_block_size);
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
@ -151,7 +151,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
{
LOCKER(m_lock);
Locker locker(m_lock);
u32 base_offset = index.value() * m_logical_block_size;
auto seek_result = file_description().seek(base_offset, SEEK_SET);
VERIFY(!seek_result.is_error());
@ -163,7 +163,7 @@ bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
{
LOCKER(m_lock);
Locker locker(m_lock);
size_t base_offset = index.value() * m_logical_block_size;
auto seek_result = file_description().seek(base_offset, SEEK_SET);
VERIFY(!seek_result.is_error());
@ -175,7 +175,7 @@ bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
bool BlockBasedFS::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelBuffer& buffer)
{
LOCKER(m_lock);
Locker locker(m_lock);
auto current = buffer;
for (unsigned block = index.value(); block < (index.value() + count); block++) {
if (!raw_read(BlockIndex { block }, current))
@ -187,7 +187,7 @@ bool BlockBasedFS::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelB
bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOrKernelBuffer& buffer)
{
LOCKER(m_lock);
Locker locker(m_lock);
auto current = buffer;
for (unsigned block = index.value(); block < (index.value() + count); block++) {
if (!raw_write(block, current))
@ -199,7 +199,7 @@ bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOr
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
{
LOCKER(m_lock);
Locker locker(m_lock);
VERIFY(m_logical_block_size);
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
for (unsigned i = 0; i < count; ++i) {
@ -212,7 +212,7 @@ KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserO
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
{
LOCKER(m_lock);
Locker locker(m_lock);
VERIFY(m_logical_block_size);
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
@ -250,7 +250,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
{
LOCKER(m_lock);
Locker locker(m_lock);
VERIFY(m_logical_block_size);
if (!count)
return EINVAL;
@ -269,7 +269,7 @@ KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernel
void BlockBasedFS::flush_specific_block_if_needed(BlockIndex index)
{
LOCKER(m_lock);
Locker locker(m_lock);
if (!cache().is_dirty())
return;
Vector<CacheEntry*, 32> cleaned_entries;
@ -292,7 +292,7 @@ void BlockBasedFS::flush_specific_block_if_needed(BlockIndex index)
void BlockBasedFS::flush_writes_impl()
{
LOCKER(m_lock);
Locker locker(m_lock);
if (!cache().is_dirty())
return;
u32 count = 0;