mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 08:07:34 +00:00
Kernel: Turn lock ranks into template parameters
This step would ideally not have been necessary (increases amount of refactoring and templates necessary, which in turn increases build times), but it gives us a couple of nice properties: - SpinlockProtected inside Singleton (a very common combination) can now obtain any lock rank just via the template parameter. It was not previously possible to do this with SingletonInstanceCreator magic. - SpinlockProtected's lock rank is now mandatory; this is the majority of cases and allows us to see where we're still missing proper ranks. - The type already informs us what lock rank a lock has, which aids code readability and (possibly, if gdb cooperates) lock mismatch debugging. - The rank of a lock can no longer be dynamic, which is not something we wanted in the first place (or made use of). Locks randomly changing their rank sounds like a disaster waiting to happen. - In some places, we might be able to statically check that locks are taken in the right order (with the right lock rank checking implementation) as rank information is fully statically known. This refactoring even more exposes the fact that Mutex has no lock rank capabilites, which is not fixed here.
This commit is contained in:
parent
363cc12146
commit
a6a439243f
94 changed files with 235 additions and 259 deletions
|
@ -41,8 +41,8 @@ public:
|
|||
u32 read32_field(Address address, u32 field);
|
||||
DeviceIdentifier get_device_identifier(Address address) const;
|
||||
|
||||
Spinlock const& scan_lock() const { return m_scan_lock; }
|
||||
RecursiveSpinlock const& access_lock() const { return m_access_lock; }
|
||||
Spinlock<LockRank::None> const& scan_lock() const { return m_scan_lock; }
|
||||
RecursiveSpinlock<LockRank::None> const& access_lock() const { return m_access_lock; }
|
||||
|
||||
ErrorOr<void> add_host_controller_and_enumerate_attached_devices(NonnullOwnPtr<HostController>, Function<void(DeviceIdentifier const&)> callback);
|
||||
|
||||
|
@ -57,8 +57,8 @@ private:
|
|||
Vector<Capability> get_capabilities(Address);
|
||||
Optional<u8> get_capabilities_pointer(Address address);
|
||||
|
||||
mutable RecursiveSpinlock m_access_lock { LockRank::None };
|
||||
mutable Spinlock m_scan_lock { LockRank::None };
|
||||
mutable RecursiveSpinlock<LockRank::None> m_access_lock {};
|
||||
mutable Spinlock<LockRank::None> m_scan_lock {};
|
||||
|
||||
HashMap<u32, NonnullOwnPtr<PCI::HostController>> m_host_controllers;
|
||||
Vector<DeviceIdentifier> m_device_identifiers;
|
||||
|
|
|
@ -29,7 +29,7 @@ private:
|
|||
|
||||
// Note: All read and writes must be done with a spinlock because
|
||||
// Linux says that CPU might deadlock otherwise if access is not serialized.
|
||||
Spinlock m_config_lock { LockRank::None };
|
||||
Spinlock<LockRank::None> m_config_lock {};
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -90,8 +90,6 @@ UNMAP_AFTER_INIT UHCIController::UHCIController(PCI::DeviceIdentifier const& pci
|
|||
: PCI::Device(pci_device_identifier.address())
|
||||
, IRQHandler(pci_device_identifier.interrupt_line().value())
|
||||
, m_registers_io_window(move(registers_io_window))
|
||||
, m_async_lock(LockRank::None)
|
||||
, m_schedule_lock(LockRank::None)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -100,8 +100,8 @@ private:
|
|||
|
||||
NonnullOwnPtr<IOWindow> m_registers_io_window;
|
||||
|
||||
Spinlock m_async_lock;
|
||||
Spinlock m_schedule_lock;
|
||||
Spinlock<LockRank::None> m_async_lock {};
|
||||
Spinlock<LockRank::None> m_schedule_lock {};
|
||||
|
||||
OwnPtr<UHCIRootHub> m_root_hub;
|
||||
OwnPtr<UHCIDescriptorPool<QueueHead>> m_queue_head_pool;
|
||||
|
|
|
@ -70,7 +70,6 @@ private:
|
|||
UHCIDescriptorPool(NonnullOwnPtr<Memory::Region> pool_memory_block, StringView name)
|
||||
: m_pool_name(name)
|
||||
, m_pool_region(move(pool_memory_block))
|
||||
, m_pool_lock(LockRank::None)
|
||||
{
|
||||
// Go through the number of descriptors to create in the pool, and create a virtual/physical address mapping
|
||||
for (size_t i = 0; i < PAGE_SIZE / sizeof(T); i++) {
|
||||
|
@ -84,7 +83,7 @@ private:
|
|||
StringView m_pool_name; // Name of this pool
|
||||
NonnullOwnPtr<Memory::Region> m_pool_region; // Memory region where descriptors actually reside
|
||||
Stack<T*, PAGE_SIZE / sizeof(T)> m_free_descriptor_stack; // Stack of currently free descriptor pointers
|
||||
Spinlock m_pool_lock;
|
||||
Spinlock<LockRank::None> m_pool_lock;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ public:
|
|||
QueueChain pop_used_buffer_chain(size_t& used);
|
||||
void discard_used_buffers();
|
||||
|
||||
Spinlock& lock() { return m_lock; }
|
||||
Spinlock<LockRank::None>& lock() { return m_lock; }
|
||||
|
||||
bool should_notify() const;
|
||||
|
||||
|
@ -96,7 +96,7 @@ private:
|
|||
QueueDriver* m_driver { nullptr };
|
||||
QueueDevice* m_device { nullptr };
|
||||
NonnullOwnPtr<Memory::Region> m_queue_region;
|
||||
Spinlock m_lock { LockRank::None };
|
||||
Spinlock<LockRank::None> m_lock {};
|
||||
|
||||
friend class QueueChain;
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue