mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 10:28:10 +00:00
Kernel: Promote various integers to 64 bits in storage layer
This commit is contained in:
parent
f3a3a63b68
commit
fdb5367da1
8 changed files with 29 additions and 45 deletions
|
@ -123,7 +123,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
|
|||
|
||||
if (!allow_cache) {
|
||||
flush_specific_block_if_needed(index);
|
||||
u32 base_offset = index.value() * block_size() + offset;
|
||||
auto base_offset = index.value() * block_size() + offset;
|
||||
auto seek_result = file_description().seek(base_offset, SEEK_SET);
|
||||
if (seek_result.is_error())
|
||||
return seek_result.error();
|
||||
|
@ -152,7 +152,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
|
|||
bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
|
||||
{
|
||||
Locker locker(m_lock);
|
||||
u32 base_offset = index.value() * m_logical_block_size;
|
||||
auto base_offset = index.value() * m_logical_block_size;
|
||||
auto seek_result = file_description().seek(base_offset, SEEK_SET);
|
||||
VERIFY(!seek_result.is_error());
|
||||
auto nread = file_description().read(buffer, m_logical_block_size);
|
||||
|
@ -164,7 +164,7 @@ bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
|
|||
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
|
||||
{
|
||||
Locker locker(m_lock);
|
||||
size_t base_offset = index.value() * m_logical_block_size;
|
||||
auto base_offset = index.value() * m_logical_block_size;
|
||||
auto seek_result = file_description().seek(base_offset, SEEK_SET);
|
||||
VERIFY(!seek_result.is_error());
|
||||
auto nwritten = file_description().write(buffer, m_logical_block_size);
|
||||
|
@ -177,7 +177,7 @@ bool BlockBasedFS::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelB
|
|||
{
|
||||
Locker locker(m_lock);
|
||||
auto current = buffer;
|
||||
for (unsigned block = index.value(); block < (index.value() + count); block++) {
|
||||
for (auto block = index.value(); block < (index.value() + count); block++) {
|
||||
if (!raw_read(BlockIndex { block }, current))
|
||||
return false;
|
||||
current = current.offset(logical_block_size());
|
||||
|
@ -189,7 +189,7 @@ bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOr
|
|||
{
|
||||
Locker locker(m_lock);
|
||||
auto current = buffer;
|
||||
for (unsigned block = index.value(); block < (index.value() + count); block++) {
|
||||
for (auto block = index.value(); block < (index.value() + count); block++) {
|
||||
if (!raw_write(block, current))
|
||||
return false;
|
||||
current = current.offset(logical_block_size());
|
||||
|
@ -297,7 +297,7 @@ void BlockBasedFS::flush_writes_impl()
|
|||
return;
|
||||
u32 count = 0;
|
||||
cache().for_each_dirty_entry([&](CacheEntry& entry) {
|
||||
u32 base_offset = entry.block_index.value() * block_size();
|
||||
auto base_offset = entry.block_index.value() * block_size();
|
||||
auto seek_result = file_description().seek(base_offset, SEEK_SET);
|
||||
VERIFY(!seek_result.is_error());
|
||||
// FIXME: Should this error path be surfaced somehow?
|
||||
|
|
|
@ -16,7 +16,7 @@ public:
|
|||
|
||||
virtual ~BlockBasedFS() override;
|
||||
|
||||
size_t logical_block_size() const { return m_logical_block_size; };
|
||||
u64 logical_block_size() const { return m_logical_block_size; };
|
||||
|
||||
virtual void flush_writes() override;
|
||||
void flush_writes_impl();
|
||||
|
@ -36,7 +36,7 @@ protected:
|
|||
KResult write_block(BlockIndex, const UserOrKernelBuffer&, size_t count, size_t offset = 0, bool allow_cache = true);
|
||||
KResult write_blocks(BlockIndex, unsigned count, const UserOrKernelBuffer&, bool allow_cache = true);
|
||||
|
||||
size_t m_logical_block_size { 512 };
|
||||
u64 m_logical_block_size { 512 };
|
||||
|
||||
private:
|
||||
DiskCache& cache() const;
|
||||
|
|
|
@ -124,7 +124,7 @@ bool Ext2FS::initialize()
|
|||
return false;
|
||||
}
|
||||
|
||||
unsigned blocks_to_read = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
|
||||
auto blocks_to_read = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
|
||||
BlockIndex first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
|
||||
m_cached_group_descriptor_table = KBuffer::try_create_with_size(block_size() * blocks_to_read, Region::Access::Read | Region::Access::Write, "Ext2FS: Block group descriptors");
|
||||
if (!m_cached_group_descriptor_table) {
|
||||
|
@ -170,9 +170,9 @@ bool Ext2FS::find_block_containing_inode(InodeIndex inode, BlockIndex& block_ind
|
|||
|
||||
auto& bgd = group_descriptor(group_index_from_inode(inode));
|
||||
|
||||
offset = ((inode.value() - 1) % inodes_per_group()) * inode_size();
|
||||
block_index = bgd.bg_inode_table + (offset >> EXT2_BLOCK_SIZE_BITS(&super_block));
|
||||
offset &= block_size() - 1;
|
||||
u64 full_offset = ((inode.value() - 1) % inodes_per_group()) * inode_size();
|
||||
block_index = bgd.bg_inode_table + (full_offset >> EXT2_BLOCK_SIZE_BITS(&super_block));
|
||||
offset = full_offset & (block_size() - 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -679,8 +679,8 @@ void Ext2FS::free_inode(Ext2FSInode& inode)
|
|||
void Ext2FS::flush_block_group_descriptor_table()
|
||||
{
|
||||
Locker locker(m_lock);
|
||||
unsigned blocks_to_write = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
|
||||
unsigned first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
|
||||
auto blocks_to_write = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
|
||||
auto first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
|
||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)block_group_descriptors());
|
||||
if (auto result = write_blocks(first_block_of_bgdt, blocks_to_write, buffer); result.is_error())
|
||||
dbgln("Ext2FS[{}]::flush_block_group_descriptor_table(): Failed to write blocks: {}", fsid(), result.error());
|
||||
|
@ -1257,21 +1257,21 @@ KResult Ext2FSInode::remove_child(const StringView& name)
|
|||
return KSuccess;
|
||||
}
|
||||
|
||||
unsigned Ext2FS::inodes_per_block() const
|
||||
u64 Ext2FS::inodes_per_block() const
|
||||
{
|
||||
return EXT2_INODES_PER_BLOCK(&super_block());
|
||||
}
|
||||
|
||||
unsigned Ext2FS::inodes_per_group() const
|
||||
u64 Ext2FS::inodes_per_group() const
|
||||
{
|
||||
return EXT2_INODES_PER_GROUP(&super_block());
|
||||
}
|
||||
|
||||
unsigned Ext2FS::inode_size() const
|
||||
u64 Ext2FS::inode_size() const
|
||||
{
|
||||
return EXT2_INODE_SIZE(&super_block());
|
||||
}
|
||||
unsigned Ext2FS::blocks_per_group() const
|
||||
u64 Ext2FS::blocks_per_group() const
|
||||
{
|
||||
return EXT2_BLOCKS_PER_GROUP(&super_block());
|
||||
}
|
||||
|
|
|
@ -118,10 +118,10 @@ private:
|
|||
ext2_group_desc* block_group_descriptors() { return (ext2_group_desc*)m_cached_group_descriptor_table->data(); }
|
||||
const ext2_group_desc* block_group_descriptors() const { return (const ext2_group_desc*)m_cached_group_descriptor_table->data(); }
|
||||
void flush_block_group_descriptor_table();
|
||||
unsigned inodes_per_block() const;
|
||||
unsigned inodes_per_group() const;
|
||||
unsigned blocks_per_group() const;
|
||||
unsigned inode_size() const;
|
||||
u64 inodes_per_block() const;
|
||||
u64 inodes_per_group() const;
|
||||
u64 blocks_per_group() const;
|
||||
u64 inode_size() const;
|
||||
|
||||
bool write_ext2_inode(InodeIndex, const ext2_inode&);
|
||||
bool find_block_containing_inode(InodeIndex, BlockIndex& block_index, unsigned& offset) const;
|
||||
|
@ -158,7 +158,7 @@ private:
|
|||
|
||||
BlockListShape compute_block_list_shape(unsigned blocks) const;
|
||||
|
||||
unsigned m_block_group_count { 0 };
|
||||
u64 m_block_group_count { 0 };
|
||||
|
||||
mutable ext2_super_block m_super_block;
|
||||
mutable OwnPtr<KBuffer> m_cached_group_descriptor_table;
|
||||
|
|
|
@ -74,20 +74,4 @@ void FS::lock_all()
|
|||
}
|
||||
}
|
||||
|
||||
void FS::set_block_size(size_t block_size)
|
||||
{
|
||||
VERIFY(block_size > 0);
|
||||
if (block_size == m_block_size)
|
||||
return;
|
||||
m_block_size = block_size;
|
||||
}
|
||||
|
||||
void FS::set_fragment_size(size_t fragment_size)
|
||||
{
|
||||
VERIFY(fragment_size > 0);
|
||||
if (fragment_size == m_fragment_size)
|
||||
return;
|
||||
m_fragment_size = fragment_size;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public:
|
|||
|
||||
virtual void flush_writes() { }
|
||||
|
||||
size_t block_size() const { return m_block_size; }
|
||||
u64 block_size() const { return m_block_size; }
|
||||
size_t fragment_size() const { return m_fragment_size; }
|
||||
|
||||
virtual bool is_file_backed() const { return false; }
|
||||
|
@ -71,14 +71,14 @@ public:
|
|||
protected:
|
||||
FS();
|
||||
|
||||
void set_block_size(size_t);
|
||||
void set_fragment_size(size_t);
|
||||
void set_block_size(u64 size) { m_block_size = size; }
|
||||
void set_fragment_size(size_t size) { m_fragment_size = size; }
|
||||
|
||||
mutable Lock m_lock { "FS" };
|
||||
|
||||
private:
|
||||
unsigned m_fsid { 0 };
|
||||
size_t m_block_size { 0 };
|
||||
u64 m_block_size { 0 };
|
||||
size_t m_fragment_size { 0 };
|
||||
bool m_readonly { false };
|
||||
};
|
||||
|
|
|
@ -16,7 +16,7 @@ namespace Kernel {
|
|||
class FS;
|
||||
struct InodeMetadata;
|
||||
|
||||
TYPEDEF_DISTINCT_ORDERED_ID(unsigned, InodeIndex);
|
||||
TYPEDEF_DISTINCT_ORDERED_ID(u64, InodeIndex);
|
||||
|
||||
class InodeIdentifier {
|
||||
public:
|
||||
|
|
|
@ -155,7 +155,7 @@ protected:
|
|||
RefPtr<StorageDevice> m_slave;
|
||||
|
||||
RefPtr<AsyncBlockDeviceRequest> m_current_request;
|
||||
size_t m_current_request_block_index { 0 };
|
||||
u64 m_current_request_block_index { 0 };
|
||||
bool m_current_request_flushing_cache { false };
|
||||
SpinLock<u8> m_request_lock;
|
||||
Lock m_lock { "IDEChannel" };
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue