1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 05:17:34 +00:00

Everywhere: Run clang-format

This commit is contained in:
Idan Horowitz 2022-04-01 20:58:27 +03:00 committed by Linus Groh
parent 0376c127f6
commit 086969277e
1665 changed files with 8479 additions and 8479 deletions

View file

@ -245,7 +245,7 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
if (!found_region)
return regions;
for (auto iter = m_regions.begin_from((*found_region)->vaddr().get()); !iter.is_end(); ++iter) {
const auto& iter_range = (*iter)->range();
auto const& iter_range = (*iter)->range();
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
TRY(regions.try_append(*iter));
@ -267,7 +267,7 @@ ErrorOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
}
// Carve out a virtual address range from a region and return the two regions on either side
ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(Region const& source_region, VirtualRange const& desired_range)
{
VirtualRange old_region_range = source_region.range();
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
@ -343,10 +343,10 @@ size_t AddressSpace::amount_dirty_private() const
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
{
SpinlockLocker lock(m_lock);
HashTable<const InodeVMObject*> vmobjects;
HashTable<InodeVMObject const*> vmobjects;
for (auto const& region : m_regions) {
if (region->vmobject().is_inode())
TRY(vmobjects.try_set(&static_cast<const InodeVMObject&>(region->vmobject())));
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region->vmobject())));
}
size_t amount = 0;
for (auto& vmobject : vmobjects)

View file

@ -22,14 +22,14 @@ public:
~AddressSpace();
PageDirectory& page_directory() { return *m_page_directory; }
const PageDirectory& page_directory() const { return *m_page_directory; }
PageDirectory const& page_directory() const { return *m_page_directory; }
ErrorOr<Region*> add_region(NonnullOwnPtr<Region>);
size_t region_count() const { return m_regions.size(); }
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() { return m_regions; }
const RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() const { return m_regions; }
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> const& regions() const { return m_regions; }
void dump_regions();

View file

@ -14,8 +14,8 @@ namespace Kernel::Memory {
class MappedROM {
public:
const u8* base() const { return region->vaddr().offset(offset).as_ptr(); }
const u8* end() const { return base() + size; }
u8 const* base() const { return region->vaddr().offset(offset).as_ptr(); }
u8 const* end() const { return base() + size; }
OwnPtr<Region> region;
size_t size { 0 };
size_t offset { 0 };
@ -33,7 +33,7 @@ public:
return {};
}
PhysicalAddress paddr_of(const u8* ptr) const { return paddr.offset(ptr - this->base()); }
PhysicalAddress paddr_of(u8 const* ptr) const { return paddr.offset(ptr - this->base()); }
};
}

View file

@ -672,7 +672,7 @@ void MemoryManager::validate_syscall_preconditions(AddressSpace& space, Register
// to avoid excessive spinlock recursion in this extremely common path.
SpinlockLocker lock(space.get_lock());
auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) {
auto unlock_and_handle_crash = [&lock, &regs](char const* description, int signal) {
lock.unlock();
handle_crash(regs, description, signal);
};

View file

@ -50,7 +50,7 @@ public:
VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; }
AddressSpace* address_space() { return m_space; }
const AddressSpace* address_space() const { return m_space; }
AddressSpace const* address_space() const { return m_space; }
void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; }

View file

@ -23,7 +23,7 @@ RingBuffer::RingBuffer(NonnullOwnPtr<Memory::Region> region, size_t capacity)
{
}
bool RingBuffer::copy_data_in(const UserOrKernelBuffer& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied)
bool RingBuffer::copy_data_in(UserOrKernelBuffer const& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied)
{
size_t start_of_free_area = (m_start_of_used + m_num_used_bytes) % m_capacity_in_bytes;
bytes_copied = min(m_capacity_in_bytes - m_num_used_bytes, min(m_capacity_in_bytes - start_of_free_area, length));

View file

@ -16,7 +16,7 @@ public:
static ErrorOr<NonnullOwnPtr<RingBuffer>> try_create(StringView region_name, size_t capacity);
bool has_space() const { return m_num_used_bytes < m_capacity_in_bytes; }
bool copy_data_in(const UserOrKernelBuffer& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied);
bool copy_data_in(UserOrKernelBuffer const& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied);
ErrorOr<size_t> copy_data_out(size_t size, UserOrKernelBuffer& buffer) const;
ErrorOr<PhysicalAddress> reserve_space(size_t size);
void reclaim_space(PhysicalAddress chunk_start, size_t chunk_size);

View file

@ -19,7 +19,7 @@ namespace Kernel::Memory {
class ScatterGatherList : public RefCounted<ScatterGatherList> {
public:
static RefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
const VMObject& vmobject() const { return m_vm_object; }
VMObject const& vmobject() const { return m_vm_object; }
VirtualAddress dma_region() const { return m_dma_region->vaddr(); }
size_t scatters_count() const { return m_vm_object->physical_pages().size(); }