1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 00:07:36 +00:00

Everywhere: Run clang-format

This commit is contained in:
Linus Groh 2022-10-17 00:06:11 +02:00
parent 8639d8bc21
commit d26aabff04
140 changed files with 1202 additions and 723 deletions

View file

@ -54,7 +54,8 @@ public:
ConsoleDevice& console_device();
template<typename DeviceType, typename... Args>
static inline ErrorOr<NonnullLockRefPtr<DeviceType>> try_create_device(Args&&... args) requires(requires(Args... args) { DeviceType::try_create(args...); })
static inline ErrorOr<NonnullLockRefPtr<DeviceType>> try_create_device(Args&&... args)
requires(requires(Args... args) { DeviceType::try_create(args...); })
{
auto device = TRY(DeviceType::try_create(forward<Args>(args)...));
device->after_inserting();

View file

@ -76,7 +76,7 @@ UNMAP_AFTER_INIT ErrorOr<void> VMWareGraphicsAdapter::initialize_fifo_registers(
return Error::from_errno(ENOTSUP);
}
m_fifo_registers = TRY(Memory::map_typed<volatile VMWareDisplayFIFORegisters>(fifo_physical_address, fifo_size, Memory::Region::Access::ReadWrite));
m_fifo_registers = TRY(Memory::map_typed<VMWareDisplayFIFORegisters volatile>(fifo_physical_address, fifo_size, Memory::Region::Access::ReadWrite));
m_fifo_registers->start = 16;
m_fifo_registers->size = 16 + (10 * 1024);
m_fifo_registers->next_command = 16;

View file

@ -48,7 +48,7 @@ private:
VMWareGraphicsAdapter(PCI::DeviceIdentifier const&, NonnullOwnPtr<IOWindow> registers_io_window);
Memory::TypedMapping<volatile VMWareDisplayFIFORegisters> m_fifo_registers;
Memory::TypedMapping<VMWareDisplayFIFORegisters volatile> m_fifo_registers;
LockRefPtr<VMWareDisplayConnector> m_display_connector;
mutable NonnullOwnPtr<IOWindow> m_registers_io_window;
mutable Spinlock m_io_access_lock { LockRank::None };

View file

@ -13,17 +13,23 @@
#define KMALLOC_SCRUB_BYTE 0xbb
#define KFREE_SCRUB_BYTE 0xaa
#define MAKE_ALIGNED_ALLOCATED(type, alignment) \
public: \
[[nodiscard]] void* operator new(size_t) \
{ \
void* ptr = kmalloc_aligned(sizeof(type), alignment); \
VERIFY(ptr); \
return ptr; \
} \
[[nodiscard]] void* operator new(size_t, std::nothrow_t const&) noexcept { return kmalloc_aligned(sizeof(type), alignment); } \
void operator delete(void* ptr) noexcept { kfree_aligned(ptr); } \
\
#define MAKE_ALIGNED_ALLOCATED(type, alignment) \
public: \
[[nodiscard]] void* operator new(size_t) \
{ \
void* ptr = kmalloc_aligned(sizeof(type), alignment); \
VERIFY(ptr); \
return ptr; \
} \
[[nodiscard]] void* operator new(size_t, std::nothrow_t const&) noexcept \
{ \
return kmalloc_aligned(sizeof(type), alignment); \
} \
void operator delete(void* ptr) noexcept \
{ \
kfree_aligned(ptr); \
} \
\
private:
// The C++ standard specifies that the nothrow allocation tag should live in the std namespace.

View file

@ -53,7 +53,7 @@ ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64
return Error::from_errno(EOVERFLOW);
#endif
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<volatile u8>(m_memory_mapped_range->paddr.offset(offset), space_length, Memory::Region::Access::ReadWrite));
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<u8 volatile>(m_memory_mapped_range->paddr.offset(offset), space_length, Memory::Region::Access::ReadWrite));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(memory_mapped_range))));
}
@ -110,7 +110,7 @@ ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_pci_device_bar(PCI::Addres
return Error::from_errno(EOVERFLOW);
if (pci_bar_space_type == PCI::BARSpaceType::Memory64BitSpace && Checked<u64>::addition_would_overflow(pci_bar_value, space_length))
return Error::from_errno(EOVERFLOW);
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<volatile u8>(PhysicalAddress(pci_bar_value & 0xfffffff0), space_length, Memory::Region::Access::ReadWrite));
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<u8 volatile>(PhysicalAddress(pci_bar_value & 0xfffffff0), space_length, Memory::Region::Access::ReadWrite));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(memory_mapped_range))));
}
@ -131,7 +131,7 @@ ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_pci_device_bar(PCI::Device
return create_for_pci_device_bar(pci_device_identifier.address(), pci_bar, space_length);
}
IOWindow::IOWindow(NonnullOwnPtr<Memory::TypedMapping<volatile u8>> memory_mapped_range)
IOWindow::IOWindow(NonnullOwnPtr<Memory::TypedMapping<u8 volatile>> memory_mapped_range)
: m_space_type(SpaceType::Memory)
, m_memory_mapped_range(move(memory_mapped_range))
{

View file

@ -75,7 +75,7 @@ public:
#endif
private:
explicit IOWindow(NonnullOwnPtr<Memory::TypedMapping<volatile u8>>);
explicit IOWindow(NonnullOwnPtr<Memory::TypedMapping<u8 volatile>>);
u8 volatile* as_memory_address_pointer();
@ -116,7 +116,7 @@ private:
// can cause problems with strict bare metal hardware. For example, some XHCI USB controllers
// might completely lock up because of an unaligned memory access to their registers.
VERIFY((start_offset % sizeof(T)) == 0);
data = *(volatile T*)(as_memory_address_pointer() + start_offset);
data = *(T volatile*)(as_memory_address_pointer() + start_offset);
}
template<typename T>
@ -135,12 +135,12 @@ private:
// can cause problems with strict bare metal hardware. For example, some XHCI USB controllers
// might completely lock up because of an unaligned memory access to their registers.
VERIFY((start_offset % sizeof(T)) == 0);
*(volatile T*)(as_memory_address_pointer() + start_offset) = value;
*(T volatile*)(as_memory_address_pointer() + start_offset) = value;
}
SpaceType m_space_type { SpaceType::Memory };
OwnPtr<Memory::TypedMapping<volatile u8>> m_memory_mapped_range;
OwnPtr<Memory::TypedMapping<u8 volatile>> m_memory_mapped_range;
#if ARCH(I386) || ARCH(X86_64)
OwnPtr<IOAddressData> m_io_range;

View file

@ -129,12 +129,12 @@ public:
};
LockRefPtr() = default;
LockRefPtr(const T* ptr)
LockRefPtr(T const* ptr)
: m_bits(PtrTraits::as_bits(const_cast<T*>(ptr)))
{
ref_if_not_null(const_cast<T*>(ptr));
}
LockRefPtr(const T& object)
LockRefPtr(T const& object)
: m_bits(PtrTraits::as_bits(const_cast<T*>(&object)))
{
T* ptr = const_cast<T*>(&object);
@ -156,18 +156,21 @@ public:
{
}
template<typename U>
ALWAYS_INLINE LockRefPtr(NonnullLockRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE LockRefPtr(NonnullLockRefPtr<U> const& other)
requires(IsConvertible<U*, T*>)
: m_bits(PtrTraits::as_bits(const_cast<U*>(other.add_ref())))
{
}
template<typename U>
ALWAYS_INLINE LockRefPtr(NonnullLockRefPtr<U>&& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE LockRefPtr(NonnullLockRefPtr<U>&& other)
requires(IsConvertible<U*, T*>)
: m_bits(PtrTraits::as_bits(&other.leak_ref()))
{
VERIFY(!is_null());
}
template<typename U, typename P = LockRefPtrTraits<U>>
LockRefPtr(LockRefPtr<U, P>&& other) requires(IsConvertible<U*, T*>)
LockRefPtr(LockRefPtr<U, P>&& other)
requires(IsConvertible<U*, T*>)
: m_bits(PtrTraits::template convert_from<U, P>(other.leak_ref_raw()))
{
}
@ -176,7 +179,8 @@ public:
{
}
template<typename U, typename P = LockRefPtrTraits<U>>
LockRefPtr(LockRefPtr<U, P> const& other) requires(IsConvertible<U*, T*>)
LockRefPtr(LockRefPtr<U, P> const& other)
requires(IsConvertible<U*, T*>)
: m_bits(other.add_ref_raw())
{
}
@ -205,7 +209,8 @@ public:
}
template<typename U, typename P = LockRefPtrTraits<U>>
void swap(LockRefPtr<U, P>& other) requires(IsConvertible<U*, T*>)
void swap(LockRefPtr<U, P>& other)
requires(IsConvertible<U*, T*>)
{
// NOTE: swap is not atomic!
FlatPtr other_bits = P::exchange(other.m_bits, P::default_null_value);
@ -221,14 +226,16 @@ public:
}
template<typename U, typename P = LockRefPtrTraits<U>>
ALWAYS_INLINE LockRefPtr& operator=(LockRefPtr<U, P>&& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE LockRefPtr& operator=(LockRefPtr<U, P>&& other)
requires(IsConvertible<U*, T*>)
{
assign_raw(PtrTraits::template convert_from<U, P>(other.leak_ref_raw()));
return *this;
}
template<typename U>
ALWAYS_INLINE LockRefPtr& operator=(NonnullLockRefPtr<U>&& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE LockRefPtr& operator=(NonnullLockRefPtr<U>&& other)
requires(IsConvertible<U*, T*>)
{
assign_raw(PtrTraits::as_bits(&other.leak_ref()));
return *this;
@ -241,7 +248,8 @@ public:
}
template<typename U>
ALWAYS_INLINE LockRefPtr& operator=(NonnullLockRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE LockRefPtr& operator=(NonnullLockRefPtr<U> const& other)
requires(IsConvertible<U*, T*>)
{
assign_raw(PtrTraits::as_bits(other.add_ref()));
return *this;
@ -255,20 +263,21 @@ public:
}
template<typename U>
ALWAYS_INLINE LockRefPtr& operator=(LockRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE LockRefPtr& operator=(LockRefPtr<U> const& other)
requires(IsConvertible<U*, T*>)
{
assign_raw(other.add_ref_raw());
return *this;
}
ALWAYS_INLINE LockRefPtr& operator=(const T* ptr)
ALWAYS_INLINE LockRefPtr& operator=(T const* ptr)
{
ref_if_not_null(const_cast<T*>(ptr));
assign_raw(PtrTraits::as_bits(const_cast<T*>(ptr)));
return *this;
}
ALWAYS_INLINE LockRefPtr& operator=(const T& object)
ALWAYS_INLINE LockRefPtr& operator=(T const& object)
{
const_cast<T&>(object).ref();
assign_raw(PtrTraits::as_bits(const_cast<T*>(&object)));
@ -317,14 +326,14 @@ public:
}
ALWAYS_INLINE T* ptr() { return as_ptr(); }
ALWAYS_INLINE const T* ptr() const { return as_ptr(); }
ALWAYS_INLINE T const* ptr() const { return as_ptr(); }
ALWAYS_INLINE T* operator->()
{
return as_nonnull_ptr();
}
ALWAYS_INLINE const T* operator->() const
ALWAYS_INLINE T const* operator->() const
{
return as_nonnull_ptr();
}
@ -334,12 +343,12 @@ public:
return *as_nonnull_ptr();
}
ALWAYS_INLINE const T& operator*() const
ALWAYS_INLINE T const& operator*() const
{
return *as_nonnull_ptr();
}
ALWAYS_INLINE operator const T*() const { return as_ptr(); }
ALWAYS_INLINE operator T const*() const { return as_ptr(); }
ALWAYS_INLINE operator T*() { return as_ptr(); }
ALWAYS_INLINE operator bool() { return !is_null(); }
@ -353,8 +362,8 @@ public:
bool operator==(LockRefPtr& other) { return as_ptr() == other.as_ptr(); }
bool operator!=(LockRefPtr& other) { return as_ptr() != other.as_ptr(); }
bool operator==(const T* other) const { return as_ptr() == other; }
bool operator!=(const T* other) const { return as_ptr() != other; }
bool operator==(T const* other) const { return as_ptr() == other; }
bool operator!=(T const* other) const { return as_ptr() != other; }
bool operator==(T* other) { return as_ptr() == other; }
bool operator!=(T* other) { return as_ptr() != other; }
@ -363,7 +372,7 @@ public:
template<typename U = T>
typename PtrTraits::NullType null_value() const
requires(IsSame<U, T> && !IsNullPointer<typename PtrTraits::NullType>)
requires(IsSame<U, T> && !IsNullPointer<typename PtrTraits::NullType>)
{
// make sure we are holding a null value
FlatPtr bits = m_bits.load(AK::MemoryOrder::memory_order_relaxed);
@ -371,7 +380,8 @@ public:
return PtrTraits::to_null_value(bits);
}
template<typename U = T>
void set_null_value(typename PtrTraits::NullType value) requires(IsSame<U, T> && !IsNullPointer<typename PtrTraits::NullType>)
void set_null_value(typename PtrTraits::NullType value)
requires(IsSame<U, T> && !IsNullPointer<typename PtrTraits::NullType>)
{
// make sure that new null value would be interpreted as a null value
FlatPtr bits = PtrTraits::from_null_value(value);
@ -445,17 +455,17 @@ private:
};
template<typename T>
struct Formatter<LockRefPtr<T>> : Formatter<const T*> {
struct Formatter<LockRefPtr<T>> : Formatter<T const*> {
ErrorOr<void> format(FormatBuilder& builder, LockRefPtr<T> const& value)
{
return Formatter<const T*>::format(builder, value.ptr());
return Formatter<T const*>::format(builder, value.ptr());
}
};
template<typename T>
struct Traits<LockRefPtr<T>> : public GenericTraits<LockRefPtr<T>> {
using PeekType = T*;
using ConstPeekType = const T*;
using ConstPeekType = T const*;
static unsigned hash(LockRefPtr<T> const& p) { return ptr_hash(p.ptr()); }
static bool equals(LockRefPtr<T> const& a, LockRefPtr<T> const& b) { return a.ptr() == b.ptr(); }
};
@ -463,17 +473,18 @@ struct Traits<LockRefPtr<T>> : public GenericTraits<LockRefPtr<T>> {
template<typename T, typename U>
inline NonnullLockRefPtr<T> static_ptr_cast(NonnullLockRefPtr<U> const& ptr)
{
return NonnullLockRefPtr<T>(static_cast<const T&>(*ptr));
return NonnullLockRefPtr<T>(static_cast<T const&>(*ptr));
}
template<typename T, typename U, typename PtrTraits = LockRefPtrTraits<T>>
inline LockRefPtr<T> static_ptr_cast(LockRefPtr<U> const& ptr)
{
return LockRefPtr<T, PtrTraits>(static_cast<const T*>(ptr.ptr()));
return LockRefPtr<T, PtrTraits>(static_cast<T const*>(ptr.ptr()));
}
template<typename T, typename PtrTraitsT, typename U, typename PtrTraitsU>
inline void swap(LockRefPtr<T, PtrTraitsT>& a, LockRefPtr<U, PtrTraitsU>& b) requires(IsConvertible<U*, T*>)
inline void swap(LockRefPtr<T, PtrTraitsT>& a, LockRefPtr<U, PtrTraitsU>& b)
requires(IsConvertible<U*, T*>)
{
a.swap(b);
}

View file

@ -19,26 +19,30 @@ public:
LockWeakPtr() = default;
template<typename U>
LockWeakPtr(WeakPtr<U> const& other) requires(IsBaseOf<T, U>)
LockWeakPtr(WeakPtr<U> const& other)
requires(IsBaseOf<T, U>)
: m_link(other.m_link)
{
}
template<typename U>
LockWeakPtr(WeakPtr<U>&& other) requires(IsBaseOf<T, U>)
LockWeakPtr(WeakPtr<U>&& other)
requires(IsBaseOf<T, U>)
: m_link(other.take_link())
{
}
template<typename U>
LockWeakPtr& operator=(WeakPtr<U>&& other) requires(IsBaseOf<T, U>)
LockWeakPtr& operator=(WeakPtr<U>&& other)
requires(IsBaseOf<T, U>)
{
m_link = other.take_link();
return *this;
}
template<typename U>
LockWeakPtr& operator=(WeakPtr<U> const& other) requires(IsBaseOf<T, U>)
LockWeakPtr& operator=(WeakPtr<U> const& other)
requires(IsBaseOf<T, U>)
{
if ((void const*)this != (void const*)&other)
m_link = other.m_link;
@ -52,20 +56,23 @@ public:
}
template<typename U>
LockWeakPtr(const U& object) requires(IsBaseOf<T, U>)
LockWeakPtr(U const& object)
requires(IsBaseOf<T, U>)
: m_link(object.template try_make_weak_ptr<U>().release_value_but_fixme_should_propagate_errors().take_link())
{
}
template<typename U>
LockWeakPtr(const U* object) requires(IsBaseOf<T, U>)
LockWeakPtr(U const* object)
requires(IsBaseOf<T, U>)
{
if (object)
m_link = object->template try_make_weak_ptr<U>().release_value_but_fixme_should_propagate_errors().take_link();
}
template<typename U>
LockWeakPtr(LockRefPtr<U> const& object) requires(IsBaseOf<T, U>)
LockWeakPtr(LockRefPtr<U> const& object)
requires(IsBaseOf<T, U>)
{
object.do_while_locked([&](U* obj) {
if (obj)
@ -74,7 +81,8 @@ public:
}
template<typename U>
LockWeakPtr(NonnullLockRefPtr<U> const& object) requires(IsBaseOf<T, U>)
LockWeakPtr(NonnullLockRefPtr<U> const& object)
requires(IsBaseOf<T, U>)
{
object.do_while_locked([&](U* obj) {
if (obj)
@ -83,14 +91,16 @@ public:
}
template<typename U>
LockWeakPtr& operator=(const U& object) requires(IsBaseOf<T, U>)
LockWeakPtr& operator=(U const& object)
requires(IsBaseOf<T, U>)
{
m_link = object.template try_make_weak_ptr<U>().release_value_but_fixme_should_propagate_errors().take_link();
return *this;
}
template<typename U>
LockWeakPtr& operator=(const U* object) requires(IsBaseOf<T, U>)
LockWeakPtr& operator=(U const* object)
requires(IsBaseOf<T, U>)
{
if (object)
m_link = object->template try_make_weak_ptr<U>().release_value_but_fixme_should_propagate_errors().take_link();
@ -100,7 +110,8 @@ public:
}
template<typename U>
LockWeakPtr& operator=(LockRefPtr<U> const& object) requires(IsBaseOf<T, U>)
LockWeakPtr& operator=(LockRefPtr<U> const& object)
requires(IsBaseOf<T, U>)
{
object.do_while_locked([&](U* obj) {
if (obj)
@ -112,7 +123,8 @@ public:
}
template<typename U>
LockWeakPtr& operator=(NonnullLockRefPtr<U> const& object) requires(IsBaseOf<T, U>)
LockWeakPtr& operator=(NonnullLockRefPtr<U> const& object)
requires(IsBaseOf<T, U>)
{
object.do_while_locked([&](U* obj) {
if (obj)
@ -175,7 +187,7 @@ inline ErrorOr<LockWeakPtr<U>> LockWeakable<T>::try_make_weak_ptr() const
// to add a ref (which should fail if the ref count is at 0) so
// that we prevent the destructor and revoke_weak_ptrs from being
// triggered until we're done.
if (!static_cast<const T*>(this)->try_ref())
if (!static_cast<T const*>(this)->try_ref())
return LockWeakPtr<U> {};
} else {
// For non-RefCounted types this means a weak reference can be
@ -187,14 +199,14 @@ inline ErrorOr<LockWeakPtr<U>> LockWeakable<T>::try_make_weak_ptr() const
// There is a small chance that we create a new WeakLink and throw
// it away because another thread beat us to it. But the window is
// pretty small and the overhead isn't terrible.
m_link.assign_if_null(TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) WeakLink(const_cast<T&>(static_cast<const T&>(*this))))));
m_link.assign_if_null(TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) WeakLink(const_cast<T&>(static_cast<T const&>(*this))))));
}
LockWeakPtr<U> weak_ptr(m_link);
if constexpr (IsBaseOf<AtomicRefCountedBase, T>) {
// Now drop the reference we temporarily added
if (static_cast<const T*>(this)->unref()) {
if (static_cast<T const*>(this)->unref()) {
// We just dropped the last reference, which should have called
// revoke_weak_ptrs, which should have invalidated our weak_ptr
VERIFY(!weak_ptr.strong_ref());
@ -205,11 +217,11 @@ inline ErrorOr<LockWeakPtr<U>> LockWeakable<T>::try_make_weak_ptr() const
}
template<typename T>
struct Formatter<LockWeakPtr<T>> : Formatter<const T*> {
struct Formatter<LockWeakPtr<T>> : Formatter<T const*> {
ErrorOr<void> format(FormatBuilder& builder, LockWeakPtr<T> const& value)
{
auto ref = value.strong_ref();
return Formatter<const T*>::format(builder, ref.ptr());
return Formatter<T const*>::format(builder, ref.ptr());
}
};

View file

@ -30,7 +30,7 @@ class WeakLink final : public AtomicRefCounted<WeakLink> {
public:
template<typename T, typename PtrTraits = LockRefPtrTraits<T>>
LockRefPtr<T, PtrTraits> strong_ref() const
requires(IsBaseOf<AtomicRefCountedBase, T>)
requires(IsBaseOf<AtomicRefCountedBase, T>)
{
LockRefPtr<T, PtrTraits> ref;

View file

@ -47,7 +47,8 @@ public:
const_cast<T&>(object).ref();
}
template<typename U>
ALWAYS_INLINE NonnullLockRefPtr(U const& object) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE NonnullLockRefPtr(U const& object)
requires(IsConvertible<U*, T*>)
: m_bits((FlatPtr) static_cast<T const*>(&object))
{
VERIFY(!(m_bits & 1));
@ -64,7 +65,8 @@ public:
VERIFY(!(m_bits & 1));
}
template<typename U>
ALWAYS_INLINE NonnullLockRefPtr(NonnullLockRefPtr<U>&& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE NonnullLockRefPtr(NonnullLockRefPtr<U>&& other)
requires(IsConvertible<U*, T*>)
: m_bits((FlatPtr)&other.leak_ref())
{
VERIFY(!(m_bits & 1));
@ -75,7 +77,8 @@ public:
VERIFY(!(m_bits & 1));
}
template<typename U>
ALWAYS_INLINE NonnullLockRefPtr(NonnullLockRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
ALWAYS_INLINE NonnullLockRefPtr(NonnullLockRefPtr<U> const& other)
requires(IsConvertible<U*, T*>)
: m_bits((FlatPtr)other.add_ref())
{
VERIFY(!(m_bits & 1));
@ -108,7 +111,8 @@ public:
}
template<typename U>
NonnullLockRefPtr& operator=(NonnullLockRefPtr<U> const& other) requires(IsConvertible<U*, T*>)
NonnullLockRefPtr& operator=(NonnullLockRefPtr<U> const& other)
requires(IsConvertible<U*, T*>)
{
assign(other.add_ref());
return *this;
@ -122,7 +126,8 @@ public:
}
template<typename U>
NonnullLockRefPtr& operator=(NonnullLockRefPtr<U>&& other) requires(IsConvertible<U*, T*>)
NonnullLockRefPtr& operator=(NonnullLockRefPtr<U>&& other)
requires(IsConvertible<U*, T*>)
{
assign(&other.leak_ref());
return *this;
@ -202,7 +207,8 @@ public:
}
template<typename U>
void swap(NonnullLockRefPtr<U>& other) requires(IsConvertible<U*, T*>)
void swap(NonnullLockRefPtr<U>& other)
requires(IsConvertible<U*, T*>)
{
// NOTE: swap is not atomic!
U* other_ptr = other.exchange(nullptr);
@ -318,7 +324,8 @@ struct Formatter<NonnullLockRefPtr<T>> : Formatter<T const*> {
};
template<typename T, typename U>
inline void swap(NonnullLockRefPtr<T>& a, NonnullLockRefPtr<U>& b) requires(IsConvertible<U*, T*>)
inline void swap(NonnullLockRefPtr<T>& a, NonnullLockRefPtr<U>& b)
requires(IsConvertible<U*, T*>)
{
a.swap(b);
}

View file

@ -32,11 +32,23 @@ private:
ALWAYS_INLINE U const* operator->() const { return &m_value; }
ALWAYS_INLINE U const& operator*() const { return m_value; }
ALWAYS_INLINE U* operator->() requires(!IsConst<U>) { return &m_value; }
ALWAYS_INLINE U& operator*() requires(!IsConst<U>) { return m_value; }
ALWAYS_INLINE U* operator->()
requires(!IsConst<U>)
{
return &m_value;
}
ALWAYS_INLINE U& operator*()
requires(!IsConst<U>)
{
return m_value;
}
ALWAYS_INLINE U const& get() const { return &m_value; }
ALWAYS_INLINE U& get() requires(!IsConst<U>) { return &m_value; }
ALWAYS_INLINE U& get()
requires(!IsConst<U>)
{
return &m_value;
}
private:
U& m_value;

View file

@ -15,12 +15,12 @@ namespace Kernel::Memory {
template<typename T>
struct TypedMapping {
const T* ptr() const { return reinterpret_cast<const T*>(region->vaddr().offset(offset).as_ptr()); }
T const* ptr() const { return reinterpret_cast<T const*>(region->vaddr().offset(offset).as_ptr()); }
T* ptr() { return reinterpret_cast<T*>(region->vaddr().offset(offset).as_ptr()); }
VirtualAddress base_address() const { return region->vaddr().offset(offset); }
const T* operator->() const { return ptr(); }
T const* operator->() const { return ptr(); }
T* operator->() { return ptr(); }
const T& operator*() const { return *ptr(); }
T const& operator*() const { return *ptr(); }
T& operator*() { return *ptr(); }
OwnPtr<Region> region;
PhysicalAddress paddr;

View file

@ -62,7 +62,7 @@ public:
}
template<typename T>
void add_random_event(const T& event_data, size_t pool)
void add_random_event(T const& event_data, size_t pool)
{
pool %= pool_count;
if (pool == 0) {
@ -159,7 +159,7 @@ public:
}
template<typename T>
void add_random_event(const T& event_data)
void add_random_event(T const& event_data)
{
auto& kernel_rng = KernelRng::the();
SpinlockLocker lock(kernel_rng.get_lock());

View file

@ -54,11 +54,11 @@ ErrorOr<Time> copy_time_from_user(timeval const* tv_user)
}
template<>
ErrorOr<Time> copy_time_from_user<const timeval>(Userspace<timeval const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
ErrorOr<Time> copy_time_from_user<timeval const>(Userspace<timeval const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
template<>
ErrorOr<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
template<>
ErrorOr<Time> copy_time_from_user<const timespec>(Userspace<timespec const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
ErrorOr<Time> copy_time_from_user<timespec const>(Userspace<timespec const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
template<>
ErrorOr<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }

View file

@ -52,21 +52,21 @@ void const* memmem(void const* haystack, size_t, void const* needle, size_t);
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, const T* src)
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, T const* src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src, sizeof(T));
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_to_user(T* dest, const T* src)
[[nodiscard]] inline ErrorOr<void> copy_to_user(T* dest, T const* src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_to_user(dest, src, sizeof(T));
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, Userspace<const T*> src)
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, Userspace<T const*> src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src.unsafe_userspace_ptr(), sizeof(T));
@ -100,7 +100,7 @@ DEPRECATE_COPY_FROM_USER_TYPE(timespec, copy_time_from_user)
DEPRECATE_COPY_FROM_USER_TYPE(timeval, copy_time_from_user)
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_to_user(Userspace<T*> dest, const T* src)
[[nodiscard]] inline ErrorOr<void> copy_to_user(Userspace<T*> dest, T const* src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_to_user(dest.unsafe_userspace_ptr(), src, sizeof(T));
@ -114,14 +114,14 @@ template<typename T>
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(void* dest, Userspace<const T*> src, size_t size)
[[nodiscard]] inline ErrorOr<void> copy_from_user(void* dest, Userspace<T const*> src, size_t size)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src.unsafe_userspace_ptr(), size);
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_from_user(T* dest, const T* src, size_t count)
[[nodiscard]] inline ErrorOr<void> copy_n_from_user(T* dest, T const* src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
@ -132,7 +132,7 @@ template<typename T>
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_to_user(T* dest, const T* src, size_t count)
[[nodiscard]] inline ErrorOr<void> copy_n_to_user(T* dest, T const* src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
@ -143,7 +143,7 @@ template<typename T>
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_from_user(T* dest, Userspace<const T*> src, size_t count)
[[nodiscard]] inline ErrorOr<void> copy_n_from_user(T* dest, Userspace<T const*> src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
@ -154,7 +154,7 @@ template<typename T>
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> try_copy_n_to_user(Userspace<T*> dest, const T* src, size_t count)
[[nodiscard]] inline ErrorOr<void> try_copy_n_to_user(Userspace<T*> dest, T const* src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);

View file

@ -11,7 +11,7 @@
namespace Kernel {
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
, IRQHandler(irq)
{

View file

@ -13,7 +13,7 @@ namespace Kernel {
class NVMeInterruptQueue : public NVMeQueue
, public IRQHandler {
public:
NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMeInterruptQueue() override {};

View file

@ -10,7 +10,7 @@
#include <Kernel/Storage/NVMe/NVMePollQueue.h>
namespace Kernel {
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
{
}

View file

@ -12,7 +12,7 @@ namespace Kernel {
class NVMePollQueue : public NVMeQueue {
public:
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMePollQueue() override {};

View file

@ -12,7 +12,7 @@
#include <Kernel/Storage/NVMe/NVMeQueue.h>
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
{
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
RefPtr<Memory::PhysicalPage> rw_dma_page;
@ -25,7 +25,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8
return queue;
}
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: m_current_request(nullptr)
, m_rw_dma_region(move(rw_dma_region))
, m_qid(qid)