1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 13:25:06 +00:00

Kernel: Rename ScopedSpinlock => SpinlockLocker

This matches MutexLocker, and doesn't sound like it's a lock itself.
This commit is contained in:
Andreas Kling 2021-08-22 01:49:22 +02:00
parent 55adace359
commit c922a7da09
78 changed files with 365 additions and 366 deletions

View file

@ -501,7 +501,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// is a chance a context switch may happen while we're trying // is a chance a context switch may happen while we're trying
// to get it. It also won't be entirely accurate and merely // to get it. It also won't be entirely accurate and merely
// reflect the status at the last context switch. // reflect the status at the last context switch.
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
if (&thread == Processor::current_thread()) { if (&thread == Processor::current_thread()) {
VERIFY(thread.state() == Thread::Running); VERIFY(thread.state() == Thread::Running);
// Leave the scheduler lock. If we trigger page faults we may // Leave the scheduler lock. If we trigger page faults we may

View file

@ -117,7 +117,7 @@ VirtualAddress MMIOAccess::get_device_configuration_space(Address address)
u8 MMIOAccess::read8_field(Address address, u32 field) u8 MMIOAccess::read8_field(Address address, u32 field)
{ {
ScopedSpinlock lock(m_access_lock); SpinlockLocker lock(m_access_lock);
VERIFY(field <= 0xfff); VERIFY(field <= 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 8-bit field {:#08x} for {}", field, address); dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 8-bit field {:#08x} for {}", field, address);
return *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))); return *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff)));
@ -125,7 +125,7 @@ u8 MMIOAccess::read8_field(Address address, u32 field)
u16 MMIOAccess::read16_field(Address address, u32 field) u16 MMIOAccess::read16_field(Address address, u32 field)
{ {
ScopedSpinlock lock(m_access_lock); SpinlockLocker lock(m_access_lock);
VERIFY(field < 0xfff); VERIFY(field < 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 16-bit field {:#08x} for {}", field, address); dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 16-bit field {:#08x} for {}", field, address);
u16 data = 0; u16 data = 0;
@ -135,7 +135,7 @@ u16 MMIOAccess::read16_field(Address address, u32 field)
u32 MMIOAccess::read32_field(Address address, u32 field) u32 MMIOAccess::read32_field(Address address, u32 field)
{ {
ScopedSpinlock lock(m_access_lock); SpinlockLocker lock(m_access_lock);
VERIFY(field <= 0xffc); VERIFY(field <= 0xffc);
dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 32-bit field {:#08x} for {}", field, address); dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 32-bit field {:#08x} for {}", field, address);
u32 data = 0; u32 data = 0;
@ -145,21 +145,21 @@ u32 MMIOAccess::read32_field(Address address, u32 field)
void MMIOAccess::write8_field(Address address, u32 field, u8 value) void MMIOAccess::write8_field(Address address, u32 field, u8 value)
{ {
ScopedSpinlock lock(m_access_lock); SpinlockLocker lock(m_access_lock);
VERIFY(field <= 0xfff); VERIFY(field <= 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 8-bit field {:#08x}, value={:#02x} for {}", field, value, address); dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 8-bit field {:#08x}, value={:#02x} for {}", field, value, address);
*((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))) = value; *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))) = value;
} }
void MMIOAccess::write16_field(Address address, u32 field, u16 value) void MMIOAccess::write16_field(Address address, u32 field, u16 value)
{ {
ScopedSpinlock lock(m_access_lock); SpinlockLocker lock(m_access_lock);
VERIFY(field < 0xfff); VERIFY(field < 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 16-bit field {:#08x}, value={:#02x} for {}", field, value, address); dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 16-bit field {:#08x}, value={:#02x} for {}", field, value, address);
ByteReader::store<u16>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value); ByteReader::store<u16>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value);
} }
void MMIOAccess::write32_field(Address address, u32 field, u32 value) void MMIOAccess::write32_field(Address address, u32 field, u32 value)
{ {
ScopedSpinlock lock(m_access_lock); SpinlockLocker lock(m_access_lock);
VERIFY(field <= 0xffc); VERIFY(field <= 0xffc);
dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 32-bit field {:#08x}, value={:#02x} for {}", field, value, address); dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 32-bit field {:#08x}, value={:#02x} for {}", field, value, address);
ByteReader::store<u32>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value); ByteReader::store<u32>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value);

View file

@ -57,7 +57,7 @@ KResultOr<size_t> SysFSUSBDeviceInformation::read_bytes(off_t offset, size_t cou
KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// Note: if the parent directory is null, it means something bad happened as this should not happen for the USB directory. // Note: if the parent directory is null, it means something bad happened as this should not happen for the USB directory.
VERIFY(m_parent_directory); VERIFY(m_parent_directory);
callback({ ".", { fsid, component_index() }, 0 }); callback({ ".", { fsid, component_index() }, 0 });
@ -72,7 +72,7 @@ KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function<bool
RefPtr<SysFSComponent> SysFSUSBBusDirectory::lookup(StringView name) RefPtr<SysFSComponent> SysFSUSBBusDirectory::lookup(StringView name)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
for (auto& device_node : m_device_nodes) { for (auto& device_node : m_device_nodes) {
if (device_node.name() == name) { if (device_node.name() == name) {
return device_node; return device_node;
@ -93,7 +93,7 @@ RefPtr<SysFSUSBDeviceInformation> SysFSUSBBusDirectory::device_node_for(USB::Dev
void SysFSUSBBusDirectory::plug(USB::Device& new_device) void SysFSUSBBusDirectory::plug(USB::Device& new_device)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
auto device_node = device_node_for(new_device); auto device_node = device_node_for(new_device);
VERIFY(!device_node); VERIFY(!device_node);
m_device_nodes.append(SysFSUSBDeviceInformation::create(new_device)); m_device_nodes.append(SysFSUSBDeviceInformation::create(new_device));
@ -101,7 +101,7 @@ void SysFSUSBBusDirectory::plug(USB::Device& new_device)
void SysFSUSBBusDirectory::unplug(USB::Device& deleted_device) void SysFSUSBBusDirectory::unplug(USB::Device& deleted_device)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
auto device_node = device_node_for(deleted_device); auto device_node = device_node_for(deleted_device);
VERIFY(device_node); VERIFY(device_node);
device_node->m_list_node.remove(); device_node->m_list_node.remove();

View file

@ -64,9 +64,9 @@ void VirtIOConsole::handle_queue_update(u16 queue_index)
dbgln_if(VIRTIO_DEBUG, "VirtIOConsole: Handle queue update {}", queue_index); dbgln_if(VIRTIO_DEBUG, "VirtIOConsole: Handle queue update {}", queue_index);
if (queue_index == CONTROL_RECEIVEQ) { if (queue_index == CONTROL_RECEIVEQ) {
ScopedSpinlock ringbuffer_lock(m_control_receive_buffer->lock()); SpinlockLocker ringbuffer_lock(m_control_receive_buffer->lock());
auto& queue = get_queue(CONTROL_RECEIVEQ); auto& queue = get_queue(CONTROL_RECEIVEQ);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
size_t used; size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
@ -81,9 +81,9 @@ void VirtIOConsole::handle_queue_update(u16 queue_index)
popped_chain = queue.pop_used_buffer_chain(used); popped_chain = queue.pop_used_buffer_chain(used);
} }
} else if (queue_index == CONTROL_TRANSMITQ) { } else if (queue_index == CONTROL_TRANSMITQ) {
ScopedSpinlock ringbuffer_lock(m_control_transmit_buffer->lock()); SpinlockLocker ringbuffer_lock(m_control_transmit_buffer->lock());
auto& queue = get_queue(CONTROL_TRANSMITQ); auto& queue = get_queue(CONTROL_TRANSMITQ);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
size_t used; size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
auto number_of_messages = 0; auto number_of_messages = 0;
@ -112,7 +112,7 @@ void VirtIOConsole::setup_multiport()
m_control_transmit_buffer = make<Memory::RingBuffer>("VirtIOConsole control transmit queue", CONTROL_BUFFER_SIZE); m_control_transmit_buffer = make<Memory::RingBuffer>("VirtIOConsole control transmit queue", CONTROL_BUFFER_SIZE);
auto& queue = get_queue(CONTROL_RECEIVEQ); auto& queue = get_queue(CONTROL_RECEIVEQ);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
VirtIOQueueChain chain(queue); VirtIOQueueChain chain(queue);
auto offset = 0ul; auto offset = 0ul;
@ -184,7 +184,7 @@ void VirtIOConsole::process_control_message(ControlMessage message)
} }
void VirtIOConsole::write_control_message(ControlMessage message) void VirtIOConsole::write_control_message(ControlMessage message)
{ {
ScopedSpinlock ringbuffer_lock(m_control_transmit_buffer->lock()); SpinlockLocker ringbuffer_lock(m_control_transmit_buffer->lock());
PhysicalAddress start_of_chunk; PhysicalAddress start_of_chunk;
size_t length_of_chunk; size_t length_of_chunk;
@ -197,7 +197,7 @@ void VirtIOConsole::write_control_message(ControlMessage message)
} }
auto& queue = get_queue(CONTROL_TRANSMITQ); auto& queue = get_queue(CONTROL_TRANSMITQ);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
VirtIOQueueChain chain(queue); VirtIOQueueChain chain(queue);
bool did_add_buffer = chain.add_buffer_to_chain(start_of_chunk, length_of_chunk, BufferType::DeviceReadable); bool did_add_buffer = chain.add_buffer_to_chain(start_of_chunk, length_of_chunk, BufferType::DeviceReadable);

View file

@ -27,7 +27,7 @@ VirtIOConsolePort::VirtIOConsolePort(unsigned port, VirtIOConsole& console)
void VirtIOConsolePort::init_receive_buffer() void VirtIOConsolePort::init_receive_buffer()
{ {
auto& queue = m_console.get_queue(m_receive_queue); auto& queue = m_console.get_queue(m_receive_queue);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
VirtIOQueueChain chain(queue); VirtIOQueueChain chain(queue);
auto buffer_start = m_receive_buffer->start_of_region(); auto buffer_start = m_receive_buffer->start_of_region();
@ -42,11 +42,11 @@ void VirtIOConsolePort::handle_queue_update(Badge<VirtIOConsole>, u16 queue_inde
VERIFY(queue_index == m_transmit_queue || queue_index == m_receive_queue); VERIFY(queue_index == m_transmit_queue || queue_index == m_receive_queue);
if (queue_index == m_receive_queue) { if (queue_index == m_receive_queue) {
auto& queue = m_console.get_queue(m_receive_queue); auto& queue = m_console.get_queue(m_receive_queue);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
size_t used; size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
ScopedSpinlock ringbuffer_lock(m_receive_buffer->lock()); SpinlockLocker ringbuffer_lock(m_receive_buffer->lock());
auto used_space = m_receive_buffer->reserve_space(used).value(); auto used_space = m_receive_buffer->reserve_space(used).value();
auto remaining_space = m_receive_buffer->bytes_till_end(); auto remaining_space = m_receive_buffer->bytes_till_end();
@ -65,9 +65,9 @@ void VirtIOConsolePort::handle_queue_update(Badge<VirtIOConsole>, u16 queue_inde
evaluate_block_conditions(); evaluate_block_conditions();
} else { } else {
ScopedSpinlock ringbuffer_lock(m_transmit_buffer->lock()); SpinlockLocker ringbuffer_lock(m_transmit_buffer->lock());
auto& queue = m_console.get_queue(m_transmit_queue); auto& queue = m_console.get_queue(m_transmit_queue);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
size_t used; size_t used;
VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
do { do {
@ -92,7 +92,7 @@ KResultOr<size_t> VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKern
if (!size) if (!size)
return 0; return 0;
ScopedSpinlock ringbuffer_lock(m_receive_buffer->lock()); SpinlockLocker ringbuffer_lock(m_receive_buffer->lock());
if (!can_read(desc, size)) if (!can_read(desc, size))
return EAGAIN; return EAGAIN;
@ -102,7 +102,7 @@ KResultOr<size_t> VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKern
if (m_receive_buffer_exhausted && m_receive_buffer->used_bytes() == 0) { if (m_receive_buffer_exhausted && m_receive_buffer->used_bytes() == 0) {
auto& queue = m_console.get_queue(m_receive_queue); auto& queue = m_console.get_queue(m_receive_queue);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
VirtIOQueueChain new_chain(queue); VirtIOQueueChain new_chain(queue);
new_chain.add_buffer_to_chain(m_receive_buffer->start_of_region(), RINGBUFFER_SIZE, BufferType::DeviceWritable); new_chain.add_buffer_to_chain(m_receive_buffer->start_of_region(), RINGBUFFER_SIZE, BufferType::DeviceWritable);
m_console.supply_chain_and_notify(m_receive_queue, new_chain); m_console.supply_chain_and_notify(m_receive_queue, new_chain);
@ -122,9 +122,9 @@ KResultOr<size_t> VirtIOConsolePort::write(FileDescription& desc, u64, const Use
if (!size) if (!size)
return 0; return 0;
ScopedSpinlock ringbuffer_lock(m_transmit_buffer->lock()); SpinlockLocker ringbuffer_lock(m_transmit_buffer->lock());
auto& queue = m_console.get_queue(m_transmit_queue); auto& queue = m_console.get_queue(m_transmit_queue);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
if (!can_write(desc, size)) if (!can_write(desc, size))
return EAGAIN; return EAGAIN;

View file

@ -43,13 +43,13 @@ VirtIOQueue::~VirtIOQueue()
void VirtIOQueue::enable_interrupts() void VirtIOQueue::enable_interrupts()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_driver->flags = 0; m_driver->flags = 0;
} }
void VirtIOQueue::disable_interrupts() void VirtIOQueue::disable_interrupts()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_driver->flags = 1; m_driver->flags = 1;
} }

View file

@ -44,7 +44,7 @@ void VirtIORNG::handle_queue_update(u16 queue_index)
size_t available_entropy = 0, used; size_t available_entropy = 0, used;
auto& queue = get_queue(REQUESTQ); auto& queue = get_queue(REQUESTQ);
{ {
ScopedSpinlock lock(queue.lock()); SpinlockLocker lock(queue.lock());
auto chain = queue.pop_used_buffer_chain(used); auto chain = queue.pop_used_buffer_chain(used);
if (chain.is_empty()) if (chain.is_empty())
return; return;
@ -64,7 +64,7 @@ void VirtIORNG::handle_queue_update(u16 queue_index)
void VirtIORNG::request_entropy_from_host() void VirtIORNG::request_entropy_from_host()
{ {
auto& queue = get_queue(REQUESTQ); auto& queue = get_queue(REQUESTQ);
ScopedSpinlock lock(queue.lock()); SpinlockLocker lock(queue.lock());
VirtIOQueueChain chain(queue); VirtIOQueueChain chain(queue);
chain.add_buffer_to_chain(m_entropy_buffer->physical_page(0)->paddr(), PAGE_SIZE, BufferType::DeviceWritable); chain.add_buffer_to_chain(m_entropy_buffer->physical_page(0)->paddr(), PAGE_SIZE, BufferType::DeviceWritable);
supply_chain_and_notify(REQUESTQ, chain); supply_chain_and_notify(REQUESTQ, chain);

View file

@ -67,7 +67,7 @@ Kernel::KResultOr<size_t> ConsoleDevice::write(FileDescription&, u64, const Kern
void ConsoleDevice::put_char(char ch) void ConsoleDevice::put_char(char ch)
{ {
Kernel::ScopedSpinlock lock(g_console_lock); Kernel::SpinlockLocker lock(g_console_lock);
#ifdef CONSOLE_OUT_TO_BOCHS_DEBUG_PORT #ifdef CONSOLE_OUT_TO_BOCHS_DEBUG_PORT
IO::out8(IO::BOCHS_DEBUG_PORT, ch); IO::out8(IO::BOCHS_DEBUG_PORT, ch);
#endif #endif

View file

@ -321,7 +321,7 @@ ByteBuffer CoreDump::create_notes_segment_data() const
KResult CoreDump::write() KResult CoreDump::write()
{ {
ScopedSpinlock lock(m_process->address_space().get_lock()); SpinlockLocker lock(m_process->address_space().get_lock());
ProcessPagingScope scope(m_process); ProcessPagingScope scope(m_process);
ByteBuffer notes_segment = create_notes_segment_data(); ByteBuffer notes_segment = create_notes_segment_data();

View file

@ -18,7 +18,7 @@ AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
AsyncDeviceRequest::~AsyncDeviceRequest() AsyncDeviceRequest::~AsyncDeviceRequest()
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(is_completed_result(m_result)); VERIFY(is_completed_result(m_result));
VERIFY(m_sub_requests_pending.is_empty()); VERIFY(m_sub_requests_pending.is_empty());
} }
@ -63,7 +63,7 @@ auto AsyncDeviceRequest::wait(Time* timeout) -> RequestWaitResult
auto AsyncDeviceRequest::get_request_result() const -> RequestResult auto AsyncDeviceRequest::get_request_result() const -> RequestResult
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return m_result; return m_result;
} }
@ -74,7 +74,7 @@ void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_r
VERIFY(sub_request->m_parent_request == nullptr); VERIFY(sub_request->m_parent_request == nullptr);
sub_request->m_parent_request = this; sub_request->m_parent_request = this;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(!is_completed_result(m_result)); VERIFY(!is_completed_result(m_result));
m_sub_requests_pending.append(sub_request); m_sub_requests_pending.append(sub_request);
if (m_result == Started) if (m_result == Started)
@ -85,7 +85,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
{ {
bool all_completed; bool all_completed;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_result == Started); VERIFY(m_result == Started);
if (m_sub_requests_pending.contains(sub_request)) { if (m_sub_requests_pending.contains(sub_request)) {
@ -131,7 +131,7 @@ void AsyncDeviceRequest::complete(RequestResult result)
VERIFY(result == Success || result == Failure || result == MemoryFault); VERIFY(result == Success || result == Failure || result == MemoryFault);
ScopedCritical critical; ScopedCritical critical;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_result == Started); VERIFY(m_result == Started);
m_result = result; m_result = result;
} }

View file

@ -61,7 +61,7 @@ public:
[[nodiscard]] RequestWaitResult wait(Time* = nullptr); [[nodiscard]] RequestWaitResult wait(Time* = nullptr);
void do_start(ScopedSpinlock<Spinlock<u8>>&& requests_lock) void do_start(SpinlockLocker<Spinlock<u8>>&& requests_lock)
{ {
if (is_completed_result(m_result)) if (is_completed_result(m_result))
return; return;

View file

@ -62,7 +62,7 @@ String Device::absolute_path(const FileDescription&) const
void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest& completed_request) void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest& completed_request)
{ {
ScopedSpinlock lock(m_requests_lock); SpinlockLocker lock(m_requests_lock);
VERIFY(!m_requests.is_empty()); VERIFY(!m_requests.is_empty());
VERIFY(m_requests.first().ptr() == &completed_request); VERIFY(m_requests.first().ptr() == &completed_request);
m_requests.remove(m_requests.begin()); m_requests.remove(m_requests.begin());

View file

@ -52,7 +52,7 @@ public:
NonnullRefPtr<AsyncRequestType> make_request(Args&&... args) NonnullRefPtr<AsyncRequestType> make_request(Args&&... args)
{ {
auto request = adopt_ref(*new AsyncRequestType(*this, forward<Args>(args)...)); auto request = adopt_ref(*new AsyncRequestType(*this, forward<Args>(args)...));
ScopedSpinlock lock(m_requests_lock); SpinlockLocker lock(m_requests_lock);
bool was_empty = m_requests.is_empty(); bool was_empty = m_requests.is_empty();
m_requests.append(request); m_requests.append(request);
if (was_empty) if (was_empty)

View file

@ -35,7 +35,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
{ {
u8 configuration; u8 configuration;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// Disable devices // Disable devices
do_wait_then_write(I8042_STATUS, 0xad); do_wait_then_write(I8042_STATUS, 0xad);
do_wait_then_write(I8042_STATUS, 0xa7); // ignored if it doesn't exist do_wait_then_write(I8042_STATUS, 0xa7); // ignored if it doesn't exist
@ -103,7 +103,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
m_first_port_available = false; m_first_port_available = false;
configuration &= ~1; configuration &= ~1;
configuration |= 1 << 4; configuration |= 1 << 4;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_wait_then_write(I8042_STATUS, 0x60); do_wait_then_write(I8042_STATUS, 0x60);
do_wait_then_write(I8042_BUFFER, configuration); do_wait_then_write(I8042_BUFFER, configuration);
} }
@ -116,7 +116,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
dbgln("I8042: Mouse device failed to initialize, disable"); dbgln("I8042: Mouse device failed to initialize, disable");
m_second_port_available = false; m_second_port_available = false;
configuration |= 1 << 5; configuration |= 1 << 5;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_wait_then_write(I8042_STATUS, 0x60); do_wait_then_write(I8042_STATUS, 0x60);
do_wait_then_write(I8042_BUFFER, configuration); do_wait_then_write(I8042_BUFFER, configuration);
} }

View file

@ -53,36 +53,36 @@ public:
bool reset_device(HIDDevice::Type device) bool reset_device(HIDDevice::Type device)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_reset_device(device); return do_reset_device(device);
} }
u8 send_command(HIDDevice::Type device, u8 command) u8 send_command(HIDDevice::Type device, u8 command)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_send_command(device, command); return do_send_command(device, command);
} }
u8 send_command(HIDDevice::Type device, u8 command, u8 data) u8 send_command(HIDDevice::Type device, u8 command, u8 data)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_send_command(device, command, data); return do_send_command(device, command, data);
} }
u8 read_from_device(HIDDevice::Type device) u8 read_from_device(HIDDevice::Type device)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_read_from_device(device); return do_read_from_device(device);
} }
void wait_then_write(u8 port, u8 data) void wait_then_write(u8 port, u8 data)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_wait_then_write(port, data); do_wait_then_write(port, data);
} }
u8 wait_then_read(u8 port) u8 wait_then_read(u8 port)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_wait_then_read(port); return do_wait_then_read(port);
} }

View file

@ -251,7 +251,7 @@ void KeyboardDevice::key_state_changed(u8 scan_code, bool pressed)
HIDManagement::the().m_client->on_key_pressed(event); HIDManagement::the().m_client->on_key_pressed(event);
{ {
ScopedSpinlock lock(m_queue_lock); SpinlockLocker lock(m_queue_lock);
m_queue.enqueue(event); m_queue.enqueue(event);
} }
@ -281,7 +281,7 @@ bool KeyboardDevice::can_read(const FileDescription&, size_t) const
KResultOr<size_t> KeyboardDevice::read(FileDescription&, u64, UserOrKernelBuffer& buffer, size_t size) KResultOr<size_t> KeyboardDevice::read(FileDescription&, u64, UserOrKernelBuffer& buffer, size_t size)
{ {
size_t nread = 0; size_t nread = 0;
ScopedSpinlock lock(m_queue_lock); SpinlockLocker lock(m_queue_lock);
while (nread < size) { while (nread < size) {
if (m_queue.is_empty()) if (m_queue.is_empty())
break; break;

View file

@ -20,7 +20,7 @@ MouseDevice::~MouseDevice()
bool MouseDevice::can_read(const FileDescription&, size_t) const bool MouseDevice::can_read(const FileDescription&, size_t) const
{ {
ScopedSpinlock lock(m_queue_lock); SpinlockLocker lock(m_queue_lock);
return !m_queue.is_empty(); return !m_queue.is_empty();
} }
@ -29,7 +29,7 @@ KResultOr<size_t> MouseDevice::read(FileDescription&, u64, UserOrKernelBuffer& b
VERIFY(size > 0); VERIFY(size > 0);
size_t nread = 0; size_t nread = 0;
size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread; size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread;
ScopedSpinlock lock(m_queue_lock); SpinlockLocker lock(m_queue_lock);
while (!m_queue.is_empty() && remaining_space_in_buffer) { while (!m_queue.is_empty() && remaining_space_in_buffer) {
auto packet = m_queue.dequeue(); auto packet = m_queue.dequeue();
lock.unlock(); lock.unlock();

View file

@ -60,7 +60,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
m_entropy_source.add_random_event(m_data.dword); m_entropy_source.add_random_event(m_data.dword);
{ {
ScopedSpinlock lock(m_queue_lock); SpinlockLocker lock(m_queue_lock);
m_queue.enqueue(parse_data_packet(m_data)); m_queue.enqueue(parse_data_packet(m_data));
} }
evaluate_block_conditions(); evaluate_block_conditions();

View file

@ -36,7 +36,7 @@ void VMWareMouseDevice::irq_handle_byte_read(u8)
if (mouse_packet.has_value()) { if (mouse_packet.has_value()) {
m_entropy_source.add_random_event(mouse_packet.value()); m_entropy_source.add_random_event(mouse_packet.value());
{ {
ScopedSpinlock lock(m_queue_lock); SpinlockLocker lock(m_queue_lock);
m_queue.enqueue(mouse_packet.value()); m_queue.enqueue(mouse_packet.value());
} }
evaluate_block_conditions(); evaluate_block_conditions();

View file

@ -84,7 +84,7 @@ KResult KCOVDevice::ioctl(FileDescription&, unsigned request, Userspace<void*> a
return ENXIO; // This proc hasn't opened the kcov dev yet return ENXIO; // This proc hasn't opened the kcov dev yet
auto kcov_instance = maybe_kcov_instance.value(); auto kcov_instance = maybe_kcov_instance.value();
ScopedSpinlock lock(kcov_instance->lock); SpinlockLocker lock(kcov_instance->lock);
switch (request) { switch (request) {
case KCOV_SETBUFSIZE: { case KCOV_SETBUFSIZE: {
if (kcov_instance->state >= KCOVInstance::TRACING) { if (kcov_instance->state >= KCOVInstance::TRACING) {

View file

@ -59,7 +59,7 @@ KResultOr<size_t> SerialDevice::read(FileDescription&, u64, UserOrKernelBuffer&
if (!size) if (!size)
return 0; return 0;
ScopedSpinlock lock(m_serial_lock); SpinlockLocker lock(m_serial_lock);
if (!(get_line_status() & DataReady)) if (!(get_line_status() & DataReady))
return 0; return 0;
@ -80,7 +80,7 @@ KResultOr<size_t> SerialDevice::write(FileDescription& description, u64, const U
if (!size) if (!size)
return 0; return 0;
ScopedSpinlock lock(m_serial_lock); SpinlockLocker lock(m_serial_lock);
if (!can_write(description, size)) if (!can_write(description, size))
return EAGAIN; return EAGAIN;

View file

@ -34,7 +34,7 @@ public:
void unblock() void unblock()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_unblock([&](auto& b, void* data, bool&) { do_unblock([&](auto& b, void* data, bool&) {
VERIFY(b.blocker_type() == Thread::Blocker::Type::File); VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b); auto& blocker = static_cast<Thread::FileBlocker&>(b);

View file

@ -412,7 +412,7 @@ Plan9FS::ReceiveCompletion::~ReceiveCompletion()
bool Plan9FS::Blocker::unblock(u16 tag) bool Plan9FS::Blocker::unblock(u16 tag)
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
m_did_unblock = true; m_did_unblock = true;
@ -428,7 +428,7 @@ bool Plan9FS::Blocker::unblock(u16 tag)
void Plan9FS::Blocker::not_blocking(bool) void Plan9FS::Blocker::not_blocking(bool)
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return; return;
} }
@ -438,7 +438,7 @@ void Plan9FS::Blocker::not_blocking(bool)
bool Plan9FS::Blocker::is_completed() const bool Plan9FS::Blocker::is_completed() const
{ {
ScopedSpinlock lock(m_completion->lock); SpinlockLocker lock(m_completion->lock);
return m_completion->completed; return m_completion->completed;
} }
@ -470,7 +470,7 @@ void Plan9FS::Plan9FSBlockCondition::unblock_all()
void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker) void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker)
{ {
if (m_fs.is_complete(*blocker.completion())) { if (m_fs.is_complete(*blocker.completion())) {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
blocker.unblock(blocker.completion()->tag); blocker.unblock(blocker.completion()->tag);
} }
} }
@ -576,7 +576,7 @@ KResult Plan9FS::read_and_dispatch_one_message()
auto optional_completion = m_completions.get(header.tag); auto optional_completion = m_completions.get(header.tag);
if (optional_completion.has_value()) { if (optional_completion.has_value()) {
auto completion = optional_completion.value(); auto completion = optional_completion.value();
ScopedSpinlock lock(completion->lock); SpinlockLocker lock(completion->lock);
completion->result = KSuccess; completion->result = KSuccess;
completion->message = adopt_own_if_nonnull(new (nothrow) Message { buffer.release_nonnull() }); completion->message = adopt_own_if_nonnull(new (nothrow) Message { buffer.release_nonnull() });
completion->completed = true; completion->completed = true;
@ -666,7 +666,7 @@ void Plan9FS::thread_main()
void Plan9FS::ensure_thread() void Plan9FS::ensure_thread()
{ {
ScopedSpinlock lock(m_thread_lock); SpinlockLocker lock(m_thread_lock);
if (!m_thread_running.exchange(true, AK::MemoryOrder::memory_order_acq_rel)) { if (!m_thread_running.exchange(true, AK::MemoryOrder::memory_order_acq_rel)) {
Process::create_kernel_process(m_thread, "Plan9FS", [&]() { Process::create_kernel_process(m_thread, "Plan9FS", [&]() {
thread_main(); thread_main();

View file

@ -14,7 +14,7 @@ static InodeIndex s_next_inode_index { 0 };
static size_t allocate_inode_index() static size_t allocate_inode_index()
{ {
ScopedSpinlock lock(s_index_lock); SpinlockLocker lock(s_index_lock);
s_next_inode_index = s_next_inode_index.value() + 1; s_next_inode_index = s_next_inode_index.value() + 1;
VERIFY(s_next_inode_index > 0); VERIFY(s_next_inode_index > 0);
return s_next_inode_index.value(); return s_next_inode_index.value();

View file

@ -86,7 +86,7 @@ class VirtualRangeAllocator;
template<typename BaseType> template<typename BaseType>
class Spinlock; class Spinlock;
template<typename LockType> template<typename LockType>
class ScopedSpinlock; class SpinlockLocker;
template<typename T> template<typename T>
class KResultOr; class KResultOr;

View file

@ -39,7 +39,7 @@ bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target) u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target)
{ {
is_empty_target = false; is_empty_target = false;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count); dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count);
@ -75,7 +75,7 @@ u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& ge
lock.unlock(); lock.unlock();
did_requeue = blockers_to_requeue.size(); did_requeue = blockers_to_requeue.size();
ScopedSpinlock target_lock(target_futex_queue->m_lock); SpinlockLocker target_lock(target_futex_queue->m_lock);
// Now that we have the lock of the target, append the blockers // Now that we have the lock of the target, append the blockers
// and notify them that they completed the move // and notify them that they completed the move
for (auto& info : blockers_to_requeue) { for (auto& info : blockers_to_requeue) {
@ -100,7 +100,7 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_emp
is_empty = false; is_empty = false;
return 0; // should we assert instead? return 0; // should we assert instead?
} }
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count); dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count);
u32 did_wake = 0; u32 did_wake = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) { do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
@ -123,7 +123,7 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_emp
u32 FutexQueue::wake_all(bool& is_empty) u32 FutexQueue::wake_all(bool& is_empty)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this); dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this);
u32 did_wake = 0; u32 did_wake = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool&) { do_unblock([&](Thread::Blocker& b, void* data, bool&) {
@ -148,7 +148,7 @@ bool FutexQueue::is_empty_and_no_imminent_waits_locked()
bool FutexQueue::queue_imminent_wait() bool FutexQueue::queue_imminent_wait()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_was_removed) if (m_was_removed)
return false; return false;
m_imminent_waits++; m_imminent_waits++;
@ -157,7 +157,7 @@ bool FutexQueue::queue_imminent_wait()
bool FutexQueue::try_remove() bool FutexQueue::try_remove()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_was_removed) if (m_was_removed)
return false; return false;
if (!is_empty_and_no_imminent_waits_locked()) if (!is_empty_and_no_imminent_waits_locked())
@ -168,7 +168,7 @@ bool FutexQueue::try_remove()
void FutexQueue::did_remove() void FutexQueue::did_remove()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_was_removed); VERIFY(m_was_removed);
VERIFY(is_empty_and_no_imminent_waits_locked()); VERIFY(is_empty_and_no_imminent_waits_locked());
} }

View file

@ -37,7 +37,7 @@ public:
bool is_empty_and_no_imminent_waits() bool is_empty_and_no_imminent_waits()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return is_empty_and_no_imminent_waits_locked(); return is_empty_and_no_imminent_waits_locked();
} }
bool is_empty_and_no_imminent_waits_locked(); bool is_empty_and_no_imminent_waits_locked();

View file

@ -474,7 +474,7 @@ private:
process_object.add("kernel", process.is_kernel_process()); process_object.add("kernel", process.is_kernel_process());
auto thread_array = process_object.add_array("threads"); auto thread_array = process_object.add_array("threads");
process.for_each_thread([&](const Thread& thread) { process.for_each_thread([&](const Thread& thread) {
ScopedSpinlock locker(thread.get_lock()); SpinlockLocker locker(thread.get_lock());
auto thread_object = thread_array.add_object(); auto thread_object = thread_array.add_object();
#if LOCK_DEBUG #if LOCK_DEBUG
thread_object.add("lock_count", thread.lock_count()); thread_object.add("lock_count", thread.lock_count());
@ -500,7 +500,7 @@ private:
}); });
}; };
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
{ {
{ {
auto array = json.add_array("processes"); auto array = json.add_array("processes");

View file

@ -208,7 +208,7 @@ bool BochsGraphicsAdapter::set_y_offset(size_t output_port_index, size_t y_offse
void BochsGraphicsAdapter::enable_consoles() void BochsGraphicsAdapter::enable_consoles()
{ {
ScopedSpinlock lock(m_console_mode_switch_lock); SpinlockLocker lock(m_console_mode_switch_lock);
VERIFY(m_framebuffer_console); VERIFY(m_framebuffer_console);
m_console_enabled = true; m_console_enabled = true;
m_registers->bochs_regs.y_offset = 0; m_registers->bochs_regs.y_offset = 0;
@ -218,7 +218,7 @@ void BochsGraphicsAdapter::enable_consoles()
} }
void BochsGraphicsAdapter::disable_consoles() void BochsGraphicsAdapter::disable_consoles()
{ {
ScopedSpinlock lock(m_console_mode_switch_lock); SpinlockLocker lock(m_console_mode_switch_lock);
VERIFY(m_framebuffer_console); VERIFY(m_framebuffer_console);
VERIFY(m_framebuffer_device); VERIFY(m_framebuffer_device);
m_console_enabled = false; m_console_enabled = false;

View file

@ -224,7 +224,7 @@ void GenericFramebufferConsole::show_cursor()
void GenericFramebufferConsole::clear(size_t x, size_t y, size_t length) void GenericFramebufferConsole::clear(size_t x, size_t y, size_t length)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (x == 0 && length == max_column()) { if (x == 0 && length == max_column()) {
// if we need to clear the entire row, just clean it with quick memset :) // if we need to clear the entire row, just clean it with quick memset :)
auto* offset_in_framebuffer = (u32*)&framebuffer_data()[x * sizeof(u32) * 8 + y * 8 * sizeof(u32) * width()]; auto* offset_in_framebuffer = (u32*)&framebuffer_data()[x * sizeof(u32) * 8 + y * 8 * sizeof(u32) * width()];
@ -264,19 +264,19 @@ void GenericFramebufferConsole::clear_glyph(size_t x, size_t y)
void GenericFramebufferConsole::enable() void GenericFramebufferConsole::enable()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
memset(framebuffer_data(), 0, height() * width() * sizeof(u32)); memset(framebuffer_data(), 0, height() * width() * sizeof(u32));
m_enabled.store(true); m_enabled.store(true);
} }
void GenericFramebufferConsole::disable() void GenericFramebufferConsole::disable()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_enabled.store(false); m_enabled.store(false);
} }
void GenericFramebufferConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical) void GenericFramebufferConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!m_enabled.load()) if (!m_enabled.load())
return; return;

View file

@ -87,8 +87,8 @@ enum VGAColor : u8 {
void TextModeConsole::set_cursor(size_t x, size_t y) void TextModeConsole::set_cursor(size_t x, size_t y)
{ {
ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock());
ScopedSpinlock lock(m_vga_lock); SpinlockLocker lock(m_vga_lock);
m_cursor_x = x; m_cursor_x = x;
m_cursor_y = y; m_cursor_y = y;
u16 value = m_current_vga_start_address + (y * width() + x); u16 value = m_current_vga_start_address + (y * width() + x);
@ -99,22 +99,22 @@ void TextModeConsole::set_cursor(size_t x, size_t y)
} }
void TextModeConsole::hide_cursor() void TextModeConsole::hide_cursor()
{ {
ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock());
ScopedSpinlock lock(m_vga_lock); SpinlockLocker lock(m_vga_lock);
IO::out8(0x3D4, 0xA); IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0x20); IO::out8(0x3D5, 0x20);
} }
void TextModeConsole::show_cursor() void TextModeConsole::show_cursor()
{ {
ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock());
ScopedSpinlock lock(m_vga_lock); SpinlockLocker lock(m_vga_lock);
IO::out8(0x3D4, 0xA); IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0x20); IO::out8(0x3D5, 0x20);
} }
void TextModeConsole::clear(size_t x, size_t y, size_t length) void TextModeConsole::clear(size_t x, size_t y, size_t length)
{ {
ScopedSpinlock lock(m_vga_lock); SpinlockLocker lock(m_vga_lock);
auto* buf = (u16*)(m_current_vga_window + (x * 2) + (y * width() * 2)); auto* buf = (u16*)(m_current_vga_window + (x * 2) + (y * width() * 2));
for (size_t index = 0; index < length; index++) { for (size_t index = 0; index < length; index++) {
buf[index] = 0x0720; buf[index] = 0x0720;
@ -127,12 +127,12 @@ void TextModeConsole::write(size_t x, size_t y, char ch, bool critical)
void TextModeConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical) void TextModeConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical)
{ {
ScopedSpinlock lock(m_vga_lock); SpinlockLocker lock(m_vga_lock);
// If we are in critical printing mode, we need to handle new lines here // If we are in critical printing mode, we need to handle new lines here
// because there's no other responsible object to do that in the print call path // because there's no other responsible object to do that in the print call path
if (critical && (ch == '\r' || ch == '\n')) { if (critical && (ch == '\r' || ch == '\n')) {
// Disable hardware VGA cursor // Disable hardware VGA cursor
ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock());
IO::out8(0x3D4, 0xA); IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0x20); IO::out8(0x3D5, 0x20);
@ -162,7 +162,7 @@ void TextModeConsole::clear_vga_row(u16 row)
void TextModeConsole::set_vga_start_row(u16 row) void TextModeConsole::set_vga_start_row(u16 row)
{ {
ScopedSpinlock lock(m_vga_lock); SpinlockLocker lock(m_vga_lock);
m_vga_start_row = row; m_vga_start_row = row;
m_current_vga_start_address = row * width(); m_current_vga_start_address = row * width();
m_current_vga_window = m_current_vga_window + row * width() * bytes_per_base_glyph(); m_current_vga_window = m_current_vga_window + row * width() * bytes_per_base_glyph();

View file

@ -27,7 +27,7 @@ NonnullRefPtr<FramebufferDevice> FramebufferDevice::create(const GraphicsDevice&
KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
{ {
ScopedSpinlock lock(m_activation_lock); SpinlockLocker lock(m_activation_lock);
REQUIRE_PROMISE(video); REQUIRE_PROMISE(video);
if (!shared) if (!shared)
return ENODEV; return ENODEV;
@ -80,7 +80,7 @@ KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescrip
void FramebufferDevice::deactivate_writes() void FramebufferDevice::deactivate_writes()
{ {
ScopedSpinlock lock(m_activation_lock); SpinlockLocker lock(m_activation_lock);
if (!m_userspace_framebuffer_region) if (!m_userspace_framebuffer_region)
return; return;
memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_size_in_bytes())); memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_size_in_bytes()));
@ -91,7 +91,7 @@ void FramebufferDevice::deactivate_writes()
} }
void FramebufferDevice::activate_writes() void FramebufferDevice::activate_writes()
{ {
ScopedSpinlock lock(m_activation_lock); SpinlockLocker lock(m_activation_lock);
if (!m_userspace_framebuffer_region || !m_real_framebuffer_vmobject) if (!m_userspace_framebuffer_region || !m_real_framebuffer_vmobject)
return; return;
// restore the image we had in the void area // restore the image we had in the void area

View file

@ -192,7 +192,7 @@ IntelNativeGraphicsAdapter::IntelNativeGraphicsAdapter(PCI::Address address)
m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::ReadWrite); m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::ReadWrite);
PCI::enable_bus_mastering(address); PCI::enable_bus_mastering(address);
{ {
ScopedSpinlock control_lock(m_control_lock); SpinlockLocker control_lock(m_control_lock);
set_gmbus_default_rate(); set_gmbus_default_rate();
set_gmbus_pin_pair(GMBusPinPair::DedicatedAnalog); set_gmbus_pin_pair(GMBusPinPair::DedicatedAnalog);
} }
@ -277,7 +277,7 @@ void IntelNativeGraphicsAdapter::write_to_register(IntelGraphics::RegisterIndex
{ {
VERIFY(m_control_lock.is_locked()); VERIFY(m_control_lock.is_locked());
VERIFY(m_registers_region); VERIFY(m_registers_region);
ScopedSpinlock lock(m_registers_lock); SpinlockLocker lock(m_registers_lock);
dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Write to {} value of {:x}", pci_address(), convert_register_index_to_string(index), value); dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Write to {} value of {:x}", pci_address(), convert_register_index_to_string(index), value);
auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr(); auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr();
*reg = value; *reg = value;
@ -286,7 +286,7 @@ u32 IntelNativeGraphicsAdapter::read_from_register(IntelGraphics::RegisterIndex
{ {
VERIFY(m_control_lock.is_locked()); VERIFY(m_control_lock.is_locked());
VERIFY(m_registers_region); VERIFY(m_registers_region);
ScopedSpinlock lock(m_registers_lock); SpinlockLocker lock(m_registers_lock);
auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr(); auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr();
u32 value = *reg; u32 value = *reg;
dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Read from {} value of {:x}", pci_address(), convert_register_index_to_string(index), value); dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Read from {} value of {:x}", pci_address(), convert_register_index_to_string(index), value);
@ -373,7 +373,7 @@ void IntelNativeGraphicsAdapter::gmbus_read(unsigned address, u8* buf, size_t le
void IntelNativeGraphicsAdapter::gmbus_read_edid() void IntelNativeGraphicsAdapter::gmbus_read_edid()
{ {
ScopedSpinlock control_lock(m_control_lock); SpinlockLocker control_lock(m_control_lock);
gmbus_write(DDC2_I2C_ADDRESS, 0); gmbus_write(DDC2_I2C_ADDRESS, 0);
gmbus_read(DDC2_I2C_ADDRESS, (u8*)&m_crt_edid, sizeof(Graphics::VideoInfoBlock)); gmbus_read(DDC2_I2C_ADDRESS, (u8*)&m_crt_edid, sizeof(Graphics::VideoInfoBlock));
} }
@ -409,8 +409,8 @@ void IntelNativeGraphicsAdapter::enable_output(PhysicalAddress fb_address, size_
bool IntelNativeGraphicsAdapter::set_crt_resolution(size_t width, size_t height) bool IntelNativeGraphicsAdapter::set_crt_resolution(size_t width, size_t height)
{ {
ScopedSpinlock control_lock(m_control_lock); SpinlockLocker control_lock(m_control_lock);
ScopedSpinlock modeset_lock(m_modeset_lock); SpinlockLocker modeset_lock(m_modeset_lock);
if (!is_resolution_valid(width, height)) { if (!is_resolution_valid(width, height)) {
return false; return false;
} }

View file

@ -81,7 +81,7 @@ void GPU::handle_queue_update(u16 queue_index)
VERIFY(queue_index == CONTROLQ); VERIFY(queue_index == CONTROLQ);
auto& queue = get_queue(CONTROLQ); auto& queue = get_queue(CONTROLQ);
ScopedSpinlock queue_lock(queue.lock()); SpinlockLocker queue_lock(queue.lock());
queue.discard_used_buffers(); queue.discard_used_buffers();
m_outstanding_request.wake_all(); m_outstanding_request.wake_all();
} }
@ -242,7 +242,7 @@ void GPU::synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t re
VERIFY(m_outstanding_request.is_empty()); VERIFY(m_outstanding_request.is_empty());
auto& queue = get_queue(CONTROLQ); auto& queue = get_queue(CONTROLQ);
{ {
ScopedSpinlock lock(queue.lock()); SpinlockLocker lock(queue.lock());
VirtIOQueueChain chain { queue }; VirtIOQueueChain chain { queue };
chain.add_buffer_to_chain(buffer_start, request_size, BufferType::DeviceReadable); chain.add_buffer_to_chain(buffer_start, request_size, BufferType::DeviceReadable);
chain.add_buffer_to_chain(buffer_start.offset(request_size), response_size, BufferType::DeviceWritable); chain.add_buffer_to_chain(buffer_start.offset(request_size), response_size, BufferType::DeviceWritable);

View file

@ -136,7 +136,7 @@ struct KmallocGlobalHeap {
// onto the region. Unless we already used the backup // onto the region. Unless we already used the backup
// memory, in which case we want to use the region as the // memory, in which case we want to use the region as the
// new backup. // new backup.
ScopedSpinlock lock(s_lock); SpinlockLocker lock(s_lock);
if (!m_global_heap.m_backup_memory) { if (!m_global_heap.m_backup_memory) {
if constexpr (KMALLOC_DEBUG) { if constexpr (KMALLOC_DEBUG) {
dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size()); dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size());
@ -235,7 +235,7 @@ void* kmalloc_eternal(size_t size)
size = round_up_to_power_of_two(size, sizeof(void*)); size = round_up_to_power_of_two(size, sizeof(void*));
ScopedSpinlock lock(s_lock); SpinlockLocker lock(s_lock);
void* ptr = s_next_eternal_ptr; void* ptr = s_next_eternal_ptr;
s_next_eternal_ptr += size; s_next_eternal_ptr += size;
VERIFY(s_next_eternal_ptr < s_end_of_eternal_range); VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
@ -246,7 +246,7 @@ void* kmalloc_eternal(size_t size)
void* kmalloc(size_t size) void* kmalloc(size_t size)
{ {
kmalloc_verify_nospinlock_held(); kmalloc_verify_nospinlock_held();
ScopedSpinlock lock(s_lock); SpinlockLocker lock(s_lock);
++g_kmalloc_call_count; ++g_kmalloc_call_count;
if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) { if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
@ -277,7 +277,7 @@ void kfree(void* ptr)
return; return;
kmalloc_verify_nospinlock_held(); kmalloc_verify_nospinlock_held();
ScopedSpinlock lock(s_lock); SpinlockLocker lock(s_lock);
++g_kfree_call_count; ++g_kfree_call_count;
++g_nested_kfree_calls; ++g_nested_kfree_calls;
@ -375,7 +375,7 @@ void operator delete[](void* ptr, size_t size) noexcept
void get_kmalloc_stats(kmalloc_stats& stats) void get_kmalloc_stats(kmalloc_stats& stats)
{ {
ScopedSpinlock lock(s_lock); SpinlockLocker lock(s_lock);
stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes(); stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes();
stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes(); stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes();
stats.bytes_eternal = g_kmalloc_bytes_eternal; stats.bytes_eternal = g_kmalloc_bytes_eternal;

View file

@ -21,7 +21,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
VERIFY(mode != Mode::Unlocked); VERIFY(mode != Mode::Unlocked);
auto current_thread = Thread::current(); auto current_thread = Thread::current();
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
bool did_block = false; bool did_block = false;
Mode current_mode = m_mode; Mode current_mode = m_mode;
switch (current_mode) { switch (current_mode) {
@ -145,7 +145,7 @@ void Mutex::unlock()
// and also from within critical sections! // and also from within critical sections!
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
Mode current_mode = m_mode; Mode current_mode = m_mode;
if constexpr (LOCK_TRACE_DEBUG) { if constexpr (LOCK_TRACE_DEBUG) {
if (current_mode == Mode::Shared) if (current_mode == Mode::Shared)
@ -196,7 +196,7 @@ void Mutex::unlock()
} }
} }
void Mutex::block(Thread& current_thread, Mode mode, ScopedSpinlock<Spinlock<u8>>& lock, u32 requested_locks) void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock<u8>>& lock, u32 requested_locks)
{ {
auto& blocked_thread_list = thread_list_for_mode(mode); auto& blocked_thread_list = thread_list_for_mode(mode);
VERIFY(!blocked_thread_list.contains(current_thread)); VERIFY(!blocked_thread_list.contains(current_thread));
@ -255,7 +255,7 @@ auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
// and also from within critical sections! // and also from within critical sections!
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
auto current_mode = m_mode; auto current_mode = m_mode;
switch (current_mode) { switch (current_mode) {
case Mode::Exclusive: { case Mode::Exclusive: {
@ -319,7 +319,7 @@ void Mutex::restore_lock(Mode mode, u32 lock_count, [[maybe_unused]] LockLocatio
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current(); auto current_thread = Thread::current();
bool did_block = false; bool did_block = false;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
switch (mode) { switch (mode) {
case Mode::Exclusive: { case Mode::Exclusive: {
auto previous_mode = m_mode; auto previous_mode = m_mode;

View file

@ -39,12 +39,12 @@ public:
[[nodiscard]] Mode force_unlock_if_locked(u32&); [[nodiscard]] Mode force_unlock_if_locked(u32&);
[[nodiscard]] bool is_locked() const [[nodiscard]] bool is_locked() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return m_mode != Mode::Unlocked; return m_mode != Mode::Unlocked;
} }
[[nodiscard]] bool own_lock() const [[nodiscard]] bool own_lock() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_mode == Mode::Exclusive) if (m_mode == Mode::Exclusive)
return m_holder == Thread::current(); return m_holder == Thread::current();
if (m_mode == Mode::Shared) if (m_mode == Mode::Shared)
@ -77,7 +77,7 @@ private:
return mode == Mode::Exclusive ? m_blocked_threads_list_exclusive : m_blocked_threads_list_shared; return mode == Mode::Exclusive ? m_blocked_threads_list_exclusive : m_blocked_threads_list_shared;
} }
void block(Thread&, Mode, ScopedSpinlock<Spinlock<u8>>&, u32); void block(Thread&, Mode, SpinlockLocker<Spinlock<u8>>&, u32);
void unblock_waiters(Mode); void unblock_waiters(Mode);
const char* m_name { nullptr }; const char* m_name { nullptr };

View file

@ -116,15 +116,14 @@ private:
}; };
template<typename LockType> template<typename LockType>
class [[nodiscard]] ScopedSpinlock { class [[nodiscard]] SpinlockLocker {
AK_MAKE_NONCOPYABLE(SpinlockLocker);
AK_MAKE_NONCOPYABLE(ScopedSpinlock);
public: public:
ScopedSpinlock() = delete; SpinlockLocker() = delete;
ScopedSpinlock& operator=(ScopedSpinlock&&) = delete; SpinlockLocker& operator=(SpinlockLocker&&) = delete;
ScopedSpinlock(LockType& lock) SpinlockLocker(LockType& lock)
: m_lock(&lock) : m_lock(&lock)
{ {
VERIFY(m_lock); VERIFY(m_lock);
@ -132,7 +131,7 @@ public:
m_have_lock = true; m_have_lock = true;
} }
ScopedSpinlock(ScopedSpinlock&& from) SpinlockLocker(SpinlockLocker&& from)
: m_lock(from.m_lock) : m_lock(from.m_lock)
, m_prev_flags(from.m_prev_flags) , m_prev_flags(from.m_prev_flags)
, m_have_lock(from.m_have_lock) , m_have_lock(from.m_have_lock)
@ -142,7 +141,7 @@ public:
from.m_have_lock = false; from.m_have_lock = false;
} }
~ScopedSpinlock() ~SpinlockLocker()
{ {
if (m_lock && m_have_lock) { if (m_lock && m_have_lock) {
m_lock->unlock(m_prev_flags); m_lock->unlock(m_prev_flags);

View file

@ -39,7 +39,7 @@ private:
private: private:
U& m_value; U& m_value;
ScopedSpinlock<RecursiveSpinlock> m_locker; SpinlockLocker<RecursiveSpinlock> m_locker;
}; };
auto lock_const() const { return Locked<T const>(m_value, m_spinlock); } auto lock_const() const { return Locked<T const>(m_value, m_spinlock); }

View file

@ -223,7 +223,7 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region) NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_region_lookup_cache.region.unsafe_ptr() == &region) if (m_region_lookup_cache.region.unsafe_ptr() == &region)
m_region_lookup_cache.region = nullptr; m_region_lookup_cache.region = nullptr;
@ -235,7 +235,7 @@ NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
Region* AddressSpace::find_region_from_range(VirtualRange const& range) Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
return m_region_lookup_cache.region.unsafe_ptr(); return m_region_lookup_cache.region.unsafe_ptr();
@ -253,7 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range) Region* AddressSpace::find_region_containing(VirtualRange const& range)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
auto candidate = m_regions.find_largest_not_above(range.base().get()); auto candidate = m_regions.find_largest_not_above(range.base().get());
if (!candidate) if (!candidate)
return nullptr; return nullptr;
@ -265,7 +265,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
Vector<Region*> regions = {}; Vector<Region*> regions = {};
size_t total_size_collected = 0; size_t total_size_collected = 0;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
auto found_region = m_regions.find_largest_not_above(range.base().get()); auto found_region = m_regions.find_largest_not_above(range.base().get());
if (!found_region) if (!found_region)
@ -286,7 +286,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
Region* AddressSpace::add_region(NonnullOwnPtr<Region> region) Region* AddressSpace::add_region(NonnullOwnPtr<Region> region)
{ {
auto* ptr = region.ptr(); auto* ptr = region.ptr();
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
auto success = m_regions.try_insert(region->vaddr().get(), move(region)); auto success = m_regions.try_insert(region->vaddr().get(), move(region));
return success ? ptr : nullptr; return success ? ptr : nullptr;
} }
@ -324,7 +324,7 @@ void AddressSpace::dump_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME", dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding); addr_padding, addr_padding, addr_padding);
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
for (auto& sorted_region : m_regions) { for (auto& sorted_region : m_regions) {
auto& region = *sorted_region; auto& region = *sorted_region;
@ -342,13 +342,13 @@ void AddressSpace::dump_regions()
void AddressSpace::remove_all_regions(Badge<Process>) void AddressSpace::remove_all_regions(Badge<Process>)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_regions.clear(); m_regions.clear();
} }
size_t AddressSpace::amount_dirty_private() const size_t AddressSpace::amount_dirty_private() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject. // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping. // The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general. // That's probably a situation that needs to be looked at in general.
@ -362,7 +362,7 @@ size_t AddressSpace::amount_dirty_private() const
size_t AddressSpace::amount_clean_inode() const size_t AddressSpace::amount_clean_inode() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
HashTable<const InodeVMObject*> vmobjects; HashTable<const InodeVMObject*> vmobjects;
for (auto& region : m_regions) { for (auto& region : m_regions) {
if (region->vmobject().is_inode()) if (region->vmobject().is_inode())
@ -376,7 +376,7 @@ size_t AddressSpace::amount_clean_inode() const
size_t AddressSpace::amount_virtual() const size_t AddressSpace::amount_virtual() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
size_t amount = 0; size_t amount = 0;
for (auto& region : m_regions) { for (auto& region : m_regions) {
amount += region->size(); amount += region->size();
@ -386,7 +386,7 @@ size_t AddressSpace::amount_virtual() const
size_t AddressSpace::amount_resident() const size_t AddressSpace::amount_resident() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page. // FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0; size_t amount = 0;
for (auto& region : m_regions) { for (auto& region : m_regions) {
@ -397,7 +397,7 @@ size_t AddressSpace::amount_resident() const
size_t AddressSpace::amount_shared() const size_t AddressSpace::amount_shared() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page. // FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts, // FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored // and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
@ -411,7 +411,7 @@ size_t AddressSpace::amount_shared() const
size_t AddressSpace::amount_purgeable_volatile() const size_t AddressSpace::amount_purgeable_volatile() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
size_t amount = 0; size_t amount = 0;
for (auto& region : m_regions) { for (auto& region : m_regions) {
if (!region->vmobject().is_anonymous()) if (!region->vmobject().is_anonymous())
@ -425,7 +425,7 @@ size_t AddressSpace::amount_purgeable_volatile() const
size_t AddressSpace::amount_purgeable_nonvolatile() const size_t AddressSpace::amount_purgeable_nonvolatile() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
size_t amount = 0; size_t amount = 0;
for (auto& region : m_regions) { for (auto& region : m_regions) {
if (!region->vmobject().is_anonymous()) if (!region->vmobject().is_anonymous())

View file

@ -16,7 +16,7 @@ namespace Kernel::Memory {
KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone() KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
{ {
// We need to acquire our lock so we copy a sane state // We need to acquire our lock so we copy a sane state
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (is_purgeable() && is_volatile()) { if (is_purgeable() && is_volatile()) {
// If this object is purgeable+volatile, create a new zero-filled purgeable+volatile // If this object is purgeable+volatile, create a new zero-filled purgeable+volatile
@ -178,7 +178,7 @@ AnonymousVMObject::~AnonymousVMObject()
size_t AnonymousVMObject::purge() size_t AnonymousVMObject::purge()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!is_purgeable() || !is_volatile()) if (!is_purgeable() || !is_volatile())
return 0; return 0;
@ -206,7 +206,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
{ {
VERIFY(is_purgeable()); VERIFY(is_purgeable());
ScopedSpinlock locker(m_lock); SpinlockLocker locker(m_lock);
was_purged = m_was_purged; was_purged = m_was_purged;
if (m_volatile == is_volatile) if (m_volatile == is_volatile)
@ -306,7 +306,7 @@ size_t AnonymousVMObject::cow_pages() const
PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr) PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (is_volatile()) { if (is_volatile()) {
// A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash. // A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash.
@ -379,13 +379,13 @@ AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages()
NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one() NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
{ {
ScopedSpinlock locker(m_lock); SpinlockLocker locker(m_lock);
return m_committed_pages.take_one(); return m_committed_pages.take_one();
} }
void AnonymousVMObject::SharedCommittedCowPages::uncommit_one() void AnonymousVMObject::SharedCommittedCowPages::uncommit_one()
{ {
ScopedSpinlock locker(m_lock); SpinlockLocker locker(m_lock);
m_committed_pages.uncommit_one(); m_committed_pages.uncommit_one();
} }

View file

@ -52,7 +52,7 @@ size_t InodeVMObject::amount_dirty() const
int InodeVMObject::release_all_clean_pages() int InodeVMObject::release_all_clean_pages()
{ {
ScopedSpinlock locker(m_lock); SpinlockLocker locker(m_lock);
int count = 0; int count = 0;
for (size_t i = 0; i < page_count(); ++i) { for (size_t i = 0; i < page_count(); ++i) {

View file

@ -63,7 +63,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
{ {
s_the = this; s_the = this;
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
parse_memory_map(); parse_memory_map();
write_cr3(kernel_page_directory().cr3()); write_cr3(kernel_page_directory().cr3());
protect_kernel_image(); protect_kernel_image();
@ -88,7 +88,7 @@ UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image() UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
{ {
ScopedSpinlock page_lock(kernel_page_directory().get_lock()); SpinlockLocker page_lock(kernel_page_directory().get_lock());
// Disable writing to the kernel text and rodata segments. // Disable writing to the kernel text and rodata segments.
for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) { for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{ {
ScopedSpinlock page_lock(kernel_page_directory().get_lock()); SpinlockLocker page_lock(kernel_page_directory().get_lock());
ScopedSpinlock mm_lock(s_mm_lock); SpinlockLocker mm_lock(s_mm_lock);
// Disable writing to the .ro_after_init section // Disable writing to the .ro_after_init section
for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) { for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
void MemoryManager::unmap_text_after_init() void MemoryManager::unmap_text_after_init()
{ {
ScopedSpinlock page_lock(kernel_page_directory().get_lock()); SpinlockLocker page_lock(kernel_page_directory().get_lock());
ScopedSpinlock mm_lock(s_mm_lock); SpinlockLocker mm_lock(s_mm_lock);
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init); auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init); auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
@ -135,8 +135,8 @@ void MemoryManager::unmap_text_after_init()
void MemoryManager::unmap_ksyms_after_init() void MemoryManager::unmap_ksyms_after_init()
{ {
ScopedSpinlock mm_lock(s_mm_lock); SpinlockLocker mm_lock(s_mm_lock);
ScopedSpinlock page_lock(kernel_page_directory().get_lock()); SpinlockLocker page_lock(kernel_page_directory().get_lock());
auto start = page_round_down((FlatPtr)start_of_kernel_ksyms); auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms); auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
@ -413,7 +413,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// try to map the entire region into kernel space so we always have it // try to map the entire region into kernel space so we always have it
// We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array // We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
// mapped yet so we can't create them // mapped yet so we can't create them
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
// Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array // Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
auto page_tables_base = m_physical_pages_region->lower(); auto page_tables_base = m_physical_pages_region->lower();
@ -612,7 +612,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
for (auto& region : MM.m_kernel_regions) { for (auto& region : MM.m_kernel_regions) {
if (region.contains(vaddr)) if (region.contains(vaddr))
return &region; return &region;
@ -628,7 +628,7 @@ Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space,
Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr) Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
{ {
ScopedSpinlock lock(space.get_lock()); SpinlockLocker lock(space.get_lock());
return find_user_region_from_vaddr_no_lock(space, vaddr); return find_user_region_from_vaddr_no_lock(space, vaddr);
} }
@ -636,7 +636,7 @@ void MemoryManager::validate_syscall_preconditions(AddressSpace& space, Register
{ {
// We take the space lock once here and then use the no_lock variants // We take the space lock once here and then use the no_lock variants
// to avoid excessive spinlock recursion in this extemely common path. // to avoid excessive spinlock recursion in this extemely common path.
ScopedSpinlock lock(space.get_lock()); SpinlockLocker lock(space.get_lock());
auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) { auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) {
lock.unlock(); lock.unlock();
@ -702,7 +702,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinlock lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value()) if (!range.has_value())
return {}; return {};
@ -721,7 +721,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy); auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
if (maybe_vm_object.is_error()) if (maybe_vm_object.is_error())
return {}; return {};
ScopedSpinlock lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value()) if (!range.has_value())
return {}; return {};
@ -734,7 +734,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
if (maybe_vm_object.is_error()) if (maybe_vm_object.is_error())
return {}; return {};
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinlock lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value()) if (!range.has_value())
return {}; return {};
@ -755,7 +755,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinlock lock(kernel_page_directory().get_lock()); SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value()) if (!range.has_value())
return {}; return {};
@ -765,7 +765,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count) Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
{ {
VERIFY(page_count > 0); VERIFY(page_count > 0);
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
if (m_system_memory_info.user_physical_pages_uncommitted < page_count) if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
return {}; return {};
@ -778,7 +778,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>
{ {
VERIFY(page_count > 0); VERIFY(page_count > 0);
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count); VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
m_system_memory_info.user_physical_pages_uncommitted += page_count; m_system_memory_info.user_physical_pages_uncommitted += page_count;
@ -787,7 +787,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>
void MemoryManager::deallocate_physical_page(PhysicalAddress paddr) void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
// Are we returning a user page? // Are we returning a user page?
for (auto& region : m_user_physical_regions) { for (auto& region : m_user_physical_regions) {
@ -839,7 +839,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill) NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
auto page = find_free_user_physical_page(true); auto page = find_free_user_physical_page(true);
if (should_zero_fill == ShouldZeroFill::Yes) { if (should_zero_fill == ShouldZeroFill::Yes) {
auto* ptr = quickmap_page(*page); auto* ptr = quickmap_page(*page);
@ -851,7 +851,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
auto page = find_free_user_physical_page(false); auto page = find_free_user_physical_page(false);
bool purged_pages = false; bool purged_pages = false;
@ -893,7 +893,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size) NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
size_t count = ceil_div(size, static_cast<size_t>(PAGE_SIZE)); size_t count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count); auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count);
@ -911,7 +911,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page() RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
auto page = m_super_physical_region->take_free_page(); auto page = m_super_physical_region->take_free_page();
if (!page) { if (!page) {
@ -934,7 +934,7 @@ void MemoryManager::enter_space(AddressSpace& space)
{ {
auto current_thread = Thread::current(); auto current_thread = Thread::current();
VERIFY(current_thread != nullptr); VERIFY(current_thread != nullptr);
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
current_thread->regs().cr3 = space.page_directory().cr3(); current_thread->regs().cr3 = space.page_directory().cr3();
write_cr3(space.page_directory().cr3()); write_cr3(space.page_directory().cr3());
@ -1006,7 +1006,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
auto& mm_data = get_data(); auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
void MemoryManager::unquickmap_page() void MemoryManager::unquickmap_page()
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
auto& mm_data = get_data(); auto& mm_data = get_data();
VERIFY(mm_data.m_quickmap_in_use.is_locked()); VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
@ -1049,20 +1049,20 @@ bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddr
bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
{ {
ScopedSpinlock lock(space.get_lock()); SpinlockLocker lock(space.get_lock());
return validate_user_stack_no_lock(space, vaddr); return validate_user_stack_no_lock(space, vaddr);
} }
void MemoryManager::register_region(Region& region) void MemoryManager::register_region(Region& region)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
if (region.is_kernel()) if (region.is_kernel())
m_kernel_regions.append(region); m_kernel_regions.append(region);
} }
void MemoryManager::unregister_region(Region& region) void MemoryManager::unregister_region(Region& region)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
if (region.is_kernel()) if (region.is_kernel())
m_kernel_regions.remove(region); m_kernel_regions.remove(region);
} }
@ -1077,7 +1077,7 @@ void MemoryManager::dump_kernel_regions()
#endif #endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME", dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding); addr_padding, addr_padding, addr_padding);
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
for (auto& region : m_kernel_regions) { for (auto& region : m_kernel_regions) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
region.vaddr().get(), region.vaddr().get(),
@ -1095,8 +1095,8 @@ void MemoryManager::dump_kernel_regions()
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable) void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
{ {
ScopedSpinlock page_lock(kernel_page_directory().get_lock()); SpinlockLocker page_lock(kernel_page_directory().get_lock());
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
auto* pte = ensure_pte(kernel_page_directory(), vaddr); auto* pte = ensure_pte(kernel_page_directory(), vaddr);
VERIFY(pte); VERIFY(pte);
if (pte->is_writable() == writable) if (pte->is_writable() == writable)

View file

@ -197,7 +197,7 @@ public:
SystemMemoryInfo get_system_memory_info() SystemMemoryInfo get_system_memory_info()
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
return m_system_memory_info; return m_system_memory_info;
} }

View file

@ -27,7 +27,7 @@ static HashMap<FlatPtr, PageDirectory*>& cr3_map()
RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3) RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
return cr3_map().get(cr3).value_or({}); return cr3_map().get(cr3).value_or({});
} }
@ -60,7 +60,7 @@ RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(VirtualRangeAlloca
} }
// NOTE: Take the MM lock since we need it for quickmap. // NOTE: Take the MM lock since we need it for quickmap.
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
#if ARCH(X86_64) #if ARCH(X86_64)
directory->m_pml4t = MM.allocate_user_physical_page(); directory->m_pml4t = MM.allocate_user_physical_page();
@ -159,7 +159,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
PageDirectory::~PageDirectory() PageDirectory::~PageDirectory()
{ {
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
if (m_space) if (m_space)
cr3_map().remove(cr3()); cr3_map().remove(cr3());
} }

View file

@ -43,8 +43,8 @@ Region::~Region()
MM.unregister_region(*this); MM.unregister_region(*this);
if (m_page_directory) { if (m_page_directory) {
ScopedSpinlock page_lock(m_page_directory->get_lock()); SpinlockLocker page_lock(m_page_directory->get_lock());
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
unmap(ShouldDeallocateVirtualRange::Yes); unmap(ShouldDeallocateVirtualRange::Yes);
VERIFY(!m_page_directory); VERIFY(!m_page_directory);
} }
@ -183,7 +183,7 @@ bool Region::map_individual_page_impl(size_t page_index)
} }
// NOTE: We have to take the MM lock for PTE's to stay valid while we use them. // NOTE: We have to take the MM lock for PTE's to stay valid while we use them.
ScopedSpinlock mm_locker(s_mm_lock); SpinlockLocker mm_locker(s_mm_lock);
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr); auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
if (!pte) if (!pte)
@ -208,12 +208,12 @@ bool Region::map_individual_page_impl(size_t page_index)
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush) bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
{ {
ScopedSpinlock lock(vmobject().m_lock); SpinlockLocker lock(vmobject().m_lock);
if (!m_page_directory) if (!m_page_directory)
return true; // not an error, region may have not yet mapped it return true; // not an error, region may have not yet mapped it
if (!translate_vmobject_page(page_index)) if (!translate_vmobject_page(page_index))
return true; // not an error, region doesn't map this page return true; // not an error, region doesn't map this page
ScopedSpinlock page_lock(m_page_directory->get_lock()); SpinlockLocker page_lock(m_page_directory->get_lock());
VERIFY(physical_page(page_index)); VERIFY(physical_page(page_index));
bool success = map_individual_page_impl(page_index); bool success = map_individual_page_impl(page_index);
if (with_flush) if (with_flush)
@ -236,8 +236,8 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
{ {
if (!m_page_directory) if (!m_page_directory)
return; return;
ScopedSpinlock page_lock(m_page_directory->get_lock()); SpinlockLocker page_lock(m_page_directory->get_lock());
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
size_t count = page_count(); size_t count = page_count();
for (size_t i = 0; i < count; ++i) { for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i); auto vaddr = vaddr_from_page_index(i);
@ -259,8 +259,8 @@ void Region::set_page_directory(PageDirectory& page_directory)
bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb) bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
{ {
ScopedSpinlock page_lock(page_directory.get_lock()); SpinlockLocker page_lock(page_directory.get_lock());
ScopedSpinlock lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
// FIXME: Find a better place for this sanity check(?) // FIXME: Find a better place for this sanity check(?)
if (is_user() && !is_shared()) { if (is_user() && !is_shared()) {
@ -338,7 +338,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
auto& page_slot = physical_page_slot(page_index_in_region); auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
ScopedSpinlock locker(vmobject().m_lock); SpinlockLocker locker(vmobject().m_lock);
if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) { if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!"); dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
@ -401,7 +401,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject]; auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
{ {
ScopedSpinlock locker(inode_vmobject.m_lock); SpinlockLocker locker(inode_vmobject.m_lock);
if (!vmobject_physical_page_entry.is_null()) { if (!vmobject_physical_page_entry.is_null()) {
dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping."); dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
if (!remap_vmobject_page(page_index_in_vmobject)) if (!remap_vmobject_page(page_index_in_vmobject))
@ -433,7 +433,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
memset(page_buffer + nread, 0, PAGE_SIZE - nread); memset(page_buffer + nread, 0, PAGE_SIZE - nread);
} }
ScopedSpinlock locker(inode_vmobject.m_lock); SpinlockLocker locker(inode_vmobject.m_lock);
if (!vmobject_physical_page_entry.is_null()) { if (!vmobject_physical_page_entry.is_null()) {
// Someone else faulted in this page while we were reading from the inode. // Someone else faulted in this page while we were reading from the inode.

View file

@ -43,13 +43,13 @@ public:
ALWAYS_INLINE void add_region(Region& region) ALWAYS_INLINE void add_region(Region& region)
{ {
ScopedSpinlock locker(m_lock); SpinlockLocker locker(m_lock);
m_regions.append(region); m_regions.append(region);
} }
ALWAYS_INLINE void remove_region(Region& region) ALWAYS_INLINE void remove_region(Region& region)
{ {
ScopedSpinlock locker(m_lock); SpinlockLocker locker(m_lock);
m_regions.remove(region); m_regions.remove(region);
} }
@ -80,7 +80,7 @@ public:
template<typename Callback> template<typename Callback>
inline void VMObject::for_each_region(Callback callback) inline void VMObject::for_each_region(Callback callback)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
for (auto& region : m_regions) { for (auto& region : m_regions) {
callback(region); callback(region);
} }

View file

@ -25,7 +25,7 @@ void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t si
void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator) void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
{ {
ScopedSpinlock lock(parent_allocator.m_lock); SpinlockLocker lock(parent_allocator.m_lock);
m_total_range = parent_allocator.m_total_range; m_total_range = parent_allocator.m_total_range;
m_available_ranges.clear(); m_available_ranges.clear();
for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) { for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
@ -103,7 +103,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
if (Checked<size_t>::addition_would_overflow(effective_size, alignment)) if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
return {}; return {};
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) { for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
auto& available_range = *it; auto& available_range = *it;
@ -142,7 +142,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
return {}; return {};
} }
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) { for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
auto& available_range = *it; auto& available_range = *it;
if (!available_range.contains(base, size)) if (!available_range.contains(base, size))
@ -159,7 +159,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
void VirtualRangeAllocator::deallocate(VirtualRange const& range) void VirtualRangeAllocator::deallocate(VirtualRange const& range)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_total_range.contains(range)); VERIFY(m_total_range.contains(range));
VERIFY(range.size()); VERIFY(range.size());
VERIFY((range.size() % PAGE_SIZE) == 0); VERIFY((range.size() % PAGE_SIZE) == 0);

View file

@ -34,7 +34,7 @@ public:
return false; return false;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
m_did_unblock = true; m_did_unblock = true;
@ -97,7 +97,7 @@ void ARPTableBlocker::not_blocking(bool timeout_in_past)
return table.get(ip_addr()); return table.get(ip_addr());
}); });
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!m_did_unblock) { if (!m_did_unblock) {
m_did_unblock = true; m_did_unblock = true;
m_addr = move(addr); m_addr = move(addr);

View file

@ -282,7 +282,7 @@ OwnPtr<PerformanceEventBuffer> PerformanceEventBuffer::try_create_with_size(size
void PerformanceEventBuffer::add_process(const Process& process, ProcessEventType event_type) void PerformanceEventBuffer::add_process(const Process& process, ProcessEventType event_type)
{ {
ScopedSpinlock locker(process.address_space().get_lock()); SpinlockLocker locker(process.address_space().get_lock());
String executable; String executable;
if (process.executable()) if (process.executable())

View file

@ -205,7 +205,7 @@ RefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, Str
if (do_register == RegisterProcess::Yes) if (do_register == RegisterProcess::Yes)
register_new(*process); register_new(*process);
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
first_thread->set_affinity(affinity); first_thread->set_affinity(affinity);
first_thread->set_state(Thread::State::Runnable); first_thread->set_state(Thread::State::Runnable);
return process; return process;
@ -429,7 +429,7 @@ RefPtr<Process> Process::from_pid(ProcessID pid)
const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i) const const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i) const
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
if (m_fds_metadatas.size() <= i) if (m_fds_metadatas.size() <= i)
return nullptr; return nullptr;
@ -440,7 +440,7 @@ const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(
} }
Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i) Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i)
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
if (m_fds_metadatas.size() <= i) if (m_fds_metadatas.size() <= i)
return nullptr; return nullptr;
@ -452,20 +452,20 @@ Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t
const Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i) const const Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i) const
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
VERIFY(m_fds_metadatas[i].is_allocated()); VERIFY(m_fds_metadatas[i].is_allocated());
return m_fds_metadatas[i]; return m_fds_metadatas[i];
} }
Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i) Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i)
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
VERIFY(m_fds_metadatas[i].is_allocated()); VERIFY(m_fds_metadatas[i].is_allocated());
return m_fds_metadatas[i]; return m_fds_metadatas[i];
} }
RefPtr<FileDescription> Process::FileDescriptions::file_description(int fd) const RefPtr<FileDescription> Process::FileDescriptions::file_description(int fd) const
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
if (fd < 0) if (fd < 0)
return nullptr; return nullptr;
if (static_cast<size_t>(fd) < m_fds_metadatas.size()) if (static_cast<size_t>(fd) < m_fds_metadatas.size())
@ -475,7 +475,7 @@ RefPtr<FileDescription> Process::FileDescriptions::file_description(int fd) cons
void Process::FileDescriptions::enumerate(Function<void(const FileDescriptionAndFlags&)> callback) const void Process::FileDescriptions::enumerate(Function<void(const FileDescriptionAndFlags&)> callback) const
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
for (auto& file_description_metadata : m_fds_metadatas) { for (auto& file_description_metadata : m_fds_metadatas) {
callback(file_description_metadata); callback(file_description_metadata);
} }
@ -483,7 +483,7 @@ void Process::FileDescriptions::enumerate(Function<void(const FileDescriptionAnd
void Process::FileDescriptions::change_each(Function<void(FileDescriptionAndFlags&)> callback) void Process::FileDescriptions::change_each(Function<void(FileDescriptionAndFlags&)> callback)
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
for (auto& file_description_metadata : m_fds_metadatas) { for (auto& file_description_metadata : m_fds_metadatas) {
callback(file_description_metadata); callback(file_description_metadata);
} }
@ -501,7 +501,7 @@ size_t Process::FileDescriptions::open_count() const
KResultOr<Process::ScopedDescriptionAllocation> Process::FileDescriptions::allocate(int first_candidate_fd) KResultOr<Process::ScopedDescriptionAllocation> Process::FileDescriptions::allocate(int first_candidate_fd)
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
for (size_t i = first_candidate_fd; i < max_open(); ++i) { for (size_t i = first_candidate_fd; i < max_open(); ++i) {
if (!m_fds_metadatas[i].is_allocated()) { if (!m_fds_metadatas[i].is_allocated()) {
m_fds_metadatas[i].allocate(); m_fds_metadatas[i].allocate();
@ -771,7 +771,7 @@ RefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_d
regs.set_ip((FlatPtr)entry); regs.set_ip((FlatPtr)entry);
regs.set_sp((FlatPtr)entry_data); // entry function argument is expected to be in the SP register regs.set_sp((FlatPtr)entry_data); // entry function argument is expected to be in the SP register
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
thread->set_state(Thread::State::Runnable); thread->set_state(Thread::State::Runnable);
return thread; return thread;
} }

View file

@ -636,7 +636,7 @@ public:
KResult try_clone(const Kernel::Process::FileDescriptions& other) KResult try_clone(const Kernel::Process::FileDescriptions& other)
{ {
ScopedSpinlock lock_other(other.m_fds_lock); SpinlockLocker lock_other(other.m_fds_lock);
if (!try_resize(other.m_fds_metadatas.size())) if (!try_resize(other.m_fds_metadatas.size()))
return ENOMEM; return ENOMEM;
@ -667,7 +667,7 @@ public:
void clear() void clear()
{ {
ScopedSpinlock lock(m_fds_lock); SpinlockLocker lock(m_fds_lock);
m_fds_metadatas.clear(); m_fds_metadatas.clear();
} }

View file

@ -71,7 +71,7 @@ InodeIndex build_segmented_index_for_file_description(ProcessID pid, unsigned fd
static size_t s_allocate_global_inode_index() static size_t s_allocate_global_inode_index()
{ {
ScopedSpinlock lock(s_index_lock); SpinlockLocker lock(s_index_lock);
s_next_inode_index = s_next_inode_index.value() + 1; s_next_inode_index = s_next_inode_index.value() + 1;
// Note: Global ProcFS indices must be above 0 and up to maximum of what 36 bit (2 ^ 36 - 1) can represent. // Note: Global ProcFS indices must be above 0 and up to maximum of what 36 bit (2 ^ 36 - 1) can represent.
VERIFY(s_next_inode_index > 0); VERIFY(s_next_inode_index > 0);

View file

@ -211,7 +211,7 @@ KResult Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder) const
{ {
JsonArraySerializer array { builder }; JsonArraySerializer array { builder };
{ {
ScopedSpinlock lock(address_space().get_lock()); SpinlockLocker lock(address_space().get_lock());
for (auto& region : address_space().regions()) { for (auto& region : address_space().regions()) {
if (!region->is_user() && !Process::current().is_superuser()) if (!region->is_user() && !Process::current().is_superuser())
continue; continue;

View file

@ -70,7 +70,7 @@ UNMAP_AFTER_INIT KernelRng::KernelRng()
void KernelRng::wait_for_entropy() void KernelRng::wait_for_entropy()
{ {
ScopedSpinlock lock(get_lock()); SpinlockLocker lock(get_lock());
if (!resource().is_ready()) { if (!resource().is_ready()) {
dbgln("Entropy starvation..."); dbgln("Entropy starvation...");
m_seed_queue.wait_forever("KernelRng"); m_seed_queue.wait_forever("KernelRng");

View file

@ -37,7 +37,7 @@ public:
bool get_random_bytes(u8* buffer, size_t n) bool get_random_bytes(u8* buffer, size_t n)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!is_ready()) if (!is_ready())
return false; return false;
if (m_p0_len >= reseed_threshold) { if (m_p0_len >= reseed_threshold) {
@ -156,7 +156,7 @@ public:
void add_random_event(const T& event_data) void add_random_event(const T& event_data)
{ {
auto& kernel_rng = KernelRng::the(); auto& kernel_rng = KernelRng::the();
ScopedSpinlock lock(kernel_rng.get_lock()); SpinlockLocker lock(kernel_rng.get_lock());
// We don't lock this because on the off chance a pool is corrupted, entropy isn't lost. // We don't lock this because on the off chance a pool is corrupted, entropy isn't lost.
Event<T> event = { read_tsc(), m_source, event_data }; Event<T> event = { read_tsc(), m_source, event_data };
kernel_rng.resource().add_random_event(event, m_pool); kernel_rng.resource().add_random_event(event, m_pool);

View file

@ -227,7 +227,7 @@ bool Scheduler::pick_next()
scheduler_data.in_scheduler = false; scheduler_data.in_scheduler = false;
}); });
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
if constexpr (SCHEDULER_RUNNABLE_DEBUG) { if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
dump_thread_list(); dump_thread_list();
@ -347,7 +347,7 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first)
// Check if we have any signals we should deliver (even if we don't // Check if we have any signals we should deliver (even if we don't
// end up switching to another thread). // end up switching to another thread).
if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode && current_thread->current_trap()) { if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode && current_thread->current_trap()) {
ScopedSpinlock lock(current_thread->get_lock()); SpinlockLocker lock(current_thread->get_lock());
if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) { if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
current_thread->dispatch_one_pending_signal(); current_thread->dispatch_one_pending_signal();
} }
@ -485,7 +485,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
} }
if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) { if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread); dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
current_thread->set_state(Thread::Dying); current_thread->set_state(Thread::Dying);
Processor::current().invoke_scheduler_async(); Processor::current().invoke_scheduler_async();

View file

@ -124,7 +124,7 @@ bool AHCIPort::is_interrupts_enabled() const
void AHCIPort::recover_from_fatal_error() void AHCIPort::recover_from_fatal_error()
{ {
MutexLocker locker(m_lock); MutexLocker locker(m_lock);
ScopedSpinlock lock(m_hard_lock); SpinlockLocker lock(m_hard_lock);
dmesgln("{}: AHCI Port {} fatal error, shutting down!", m_parent_handler->hba_controller()->pci_address(), representative_port_index()); dmesgln("{}: AHCI Port {} fatal error, shutting down!", m_parent_handler->hba_controller()->pci_address(), representative_port_index());
dmesgln("{}: AHCI Port {} fatal error, SError {}", m_parent_handler->hba_controller()->pci_address(), representative_port_index(), (u32)m_port_registers.serr); dmesgln("{}: AHCI Port {} fatal error, SError {}", m_parent_handler->hba_controller()->pci_address(), representative_port_index(), (u32)m_port_registers.serr);
stop_command_list_processing(); stop_command_list_processing();
@ -208,7 +208,7 @@ void AHCIPort::eject()
bool AHCIPort::reset() bool AHCIPort::reset()
{ {
MutexLocker locker(m_lock); MutexLocker locker(m_lock);
ScopedSpinlock lock(m_hard_lock); SpinlockLocker lock(m_hard_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Resetting", representative_port_index()); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Resetting", representative_port_index());
@ -233,12 +233,12 @@ bool AHCIPort::reset()
bool AHCIPort::initialize_without_reset() bool AHCIPort::initialize_without_reset()
{ {
MutexLocker locker(m_lock); MutexLocker locker(m_lock);
ScopedSpinlock lock(m_hard_lock); SpinlockLocker lock(m_hard_lock);
dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status()); dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status());
return initialize(lock); return initialize(lock);
} }
bool AHCIPort::initialize(ScopedSpinlock<Spinlock<u8>>& main_lock) bool AHCIPort::initialize(SpinlockLocker<Spinlock<u8>>& main_lock)
{ {
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Initialization. Signature = {:#08x}", representative_port_index(), static_cast<u32>(m_port_registers.sig)); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Initialization. Signature = {:#08x}", representative_port_index(), static_cast<u32>(m_port_registers.sig));
@ -504,7 +504,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
VERIFY(is_operable()); VERIFY(is_operable());
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(m_current_scatter_list); VERIFY(m_current_scatter_list);
ScopedSpinlock lock(m_hard_lock); SpinlockLocker lock(m_hard_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {}", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {}", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count);
if (!spin_until_ready()) if (!spin_until_ready())
@ -591,7 +591,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
return true; return true;
} }
bool AHCIPort::identify_device(ScopedSpinlock<Spinlock<u8>>& main_lock) bool AHCIPort::identify_device(SpinlockLocker<Spinlock<u8>>& main_lock)
{ {
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(is_operable()); VERIFY(is_operable());
@ -654,7 +654,7 @@ bool AHCIPort::identify_device(ScopedSpinlock<Spinlock<u8>>& main_lock)
bool AHCIPort::shutdown() bool AHCIPort::shutdown()
{ {
MutexLocker locker(m_lock); MutexLocker locker(m_lock);
ScopedSpinlock lock(m_hard_lock); SpinlockLocker lock(m_hard_lock);
rebase(); rebase();
set_interface_state(AHCI::DeviceDetectionInitialization::DisableInterface); set_interface_state(AHCI::DeviceDetectionInitialization::DisableInterface);
return true; return true;
@ -740,7 +740,7 @@ void AHCIPort::stop_fis_receiving() const
m_port_registers.cmd = m_port_registers.cmd & 0xFFFFFFEF; m_port_registers.cmd = m_port_registers.cmd & 0xFFFFFFEF;
} }
bool AHCIPort::initiate_sata_reset(ScopedSpinlock<Spinlock<u8>>& main_lock) bool AHCIPort::initiate_sata_reset(SpinlockLocker<Spinlock<u8>>& main_lock)
{ {
VERIFY(m_lock.is_locked()); VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked()); VERIFY(m_hard_lock.is_locked());

View file

@ -51,7 +51,7 @@ public:
private: private:
bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; } bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; }
bool initialize(ScopedSpinlock<Spinlock<u8>>&); bool initialize(SpinlockLocker<Spinlock<u8>>&);
UNMAP_AFTER_INIT AHCIPort(const AHCIPortHandler&, volatile AHCI::PortRegisters&, u32 port_index); UNMAP_AFTER_INIT AHCIPort(const AHCIPortHandler&, volatile AHCI::PortRegisters&, u32 port_index);
@ -62,7 +62,7 @@ private:
const char* try_disambiguate_sata_status(); const char* try_disambiguate_sata_status();
void try_disambiguate_sata_error(); void try_disambiguate_sata_error();
bool initiate_sata_reset(ScopedSpinlock<Spinlock<u8>>&); bool initiate_sata_reset(SpinlockLocker<Spinlock<u8>>&);
void rebase(); void rebase();
void recover_from_fatal_error(); void recover_from_fatal_error();
bool shutdown(); bool shutdown();
@ -79,7 +79,7 @@ private:
bool spin_until_ready() const; bool spin_until_ready() const;
bool identify_device(ScopedSpinlock<Spinlock<u8>>&); bool identify_device(SpinlockLocker<Spinlock<u8>>&);
ALWAYS_INLINE void start_command_list_processing() const; ALWAYS_INLINE void start_command_list_processing() const;
ALWAYS_INLINE void mark_command_header_ready_to_process(u8 command_header_index) const; ALWAYS_INLINE void mark_command_header_ready_to_process(u8 command_header_index) const;

View file

@ -80,7 +80,7 @@ bool BMIDEChannel::handle_irq(const RegisterState&)
// clear bus master interrupt status // clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4); m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
ScopedSpinlock lock(m_request_lock); SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}", dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0, (status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0, (status & ATA_SR_BSY) != 0,
@ -116,7 +116,7 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re
// before Processor::deferred_call_queue returns! // before Processor::deferred_call_queue returns!
g_io_work->queue([this, result]() { g_io_work->queue([this, result]() {
dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result); dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
ScopedSpinlock lock(m_request_lock); SpinlockLocker lock(m_request_lock);
VERIFY(m_current_request); VERIFY(m_current_request);
auto current_request = m_current_request; auto current_request = m_current_request;
m_current_request.clear(); m_current_request.clear();
@ -146,7 +146,7 @@ void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
VERIFY(!m_current_request.is_null()); VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256); VERIFY(m_current_request->block_count() <= 256);
ScopedSpinlock m_lock(m_request_lock); SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count()); dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
prdt().offset = m_dma_buffer_page->paddr().get(); prdt().offset = m_dma_buffer_page->paddr().get();
@ -194,7 +194,7 @@ void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
VERIFY(!m_current_request.is_null()); VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256); VERIFY(m_current_request->block_count() <= 256);
ScopedSpinlock m_lock(m_request_lock); SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count()); dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine. // Note: This is a fix for a quirk for an IDE controller on ICH7 machine.

View file

@ -197,7 +197,7 @@ bool IDEChannel::handle_irq(const RegisterState&)
m_entropy_source.add_random_event(status); m_entropy_source.add_random_event(status);
ScopedSpinlock lock(m_request_lock); SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}", dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0, (status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0, (status & ATA_SR_BSY) != 0,
@ -223,7 +223,7 @@ bool IDEChannel::handle_irq(const RegisterState&)
// trigger page faults // trigger page faults
g_io_work->queue([this]() { g_io_work->queue([this]() {
MutexLocker locker(m_lock); MutexLocker locker(m_lock);
ScopedSpinlock lock(m_request_lock); SpinlockLocker lock(m_request_lock);
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) { if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count()); dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
@ -498,7 +498,7 @@ void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
VERIFY(!m_current_request.is_null()); VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256); VERIFY(m_current_request->block_count() <= 256);
ScopedSpinlock m_lock(m_request_lock); SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors"); dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index()); dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities); ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
@ -536,7 +536,7 @@ void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
VERIFY(!m_current_request.is_null()); VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256); VERIFY(m_current_request->block_count() <= 256);
ScopedSpinlock m_lock(m_request_lock); SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index()); dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities); ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
ata_do_write_sector(); ata_do_write_sector();

View file

@ -682,7 +682,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
} }
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
new_main_thread->set_state(Thread::State::Runnable); new_main_thread->set_state(Thread::State::Runnable);
} }
u32 lock_count_to_restore; u32 lock_count_to_restore;

View file

@ -93,7 +93,7 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
#endif #endif
{ {
ScopedSpinlock lock(address_space().get_lock()); SpinlockLocker lock(address_space().get_lock());
for (auto& region : address_space().regions()) { for (auto& region : address_space().regions()) {
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr()); dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr());
auto maybe_region_clone = region->try_clone(); auto maybe_region_clone = region->try_clone();
@ -120,7 +120,7 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
PerformanceManager::add_process_created_event(*child); PerformanceManager::add_process_created_event(*child);
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
child_first_thread->set_affinity(Thread::current()->affinity()); child_first_thread->set_affinity(Thread::current()->affinity());
child_first_thread->set_state(Thread::State::Runnable); child_first_thread->set_state(Thread::State::Runnable);

View file

@ -13,7 +13,7 @@ namespace Kernel {
void Process::clear_futex_queues_on_exec() void Process::clear_futex_queues_on_exec()
{ {
ScopedSpinlock lock(m_futex_lock); SpinlockLocker lock(m_futex_lock);
for (auto& it : m_futex_queues) { for (auto& it : m_futex_queues) {
bool did_wake_all; bool did_wake_all;
it.value->wake_all(did_wake_all); it.value->wake_all(did_wake_all);
@ -88,7 +88,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
auto do_wake = [&](FlatPtr user_address, u32 count, Optional<u32> bitmask) -> int { auto do_wake = [&](FlatPtr user_address, u32 count, Optional<u32> bitmask) -> int {
if (count == 0) if (count == 0)
return 0; return 0;
ScopedSpinlock locker(m_futex_lock); SpinlockLocker locker(m_futex_lock);
auto futex_queue = find_futex_queue(user_address, false); auto futex_queue = find_futex_queue(user_address, false);
if (!futex_queue) if (!futex_queue)
return 0; return 0;
@ -117,7 +117,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
} }
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire); atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
ScopedSpinlock locker(m_futex_lock); SpinlockLocker locker(m_futex_lock);
did_create = false; did_create = false;
futex_queue = find_futex_queue(user_address, true, &did_create); futex_queue = find_futex_queue(user_address, true, &did_create);
VERIFY(futex_queue); VERIFY(futex_queue);
@ -130,7 +130,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset); Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset);
ScopedSpinlock locker(m_futex_lock); SpinlockLocker locker(m_futex_lock);
if (futex_queue->is_empty_and_no_imminent_waits()) { if (futex_queue->is_empty_and_no_imminent_waits()) {
// If there are no more waiters, we want to get rid of the futex! // If there are no more waiters, we want to get rid of the futex!
remove_futex_queue(user_address); remove_futex_queue(user_address);
@ -150,7 +150,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire); atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
int woken_or_requeued = 0; int woken_or_requeued = 0;
ScopedSpinlock locker(m_futex_lock); SpinlockLocker locker(m_futex_lock);
if (auto futex_queue = find_futex_queue(user_address, false)) { if (auto futex_queue = find_futex_queue(user_address, false)) {
RefPtr<FutexQueue> target_futex_queue; RefPtr<FutexQueue> target_futex_queue;
bool is_empty, is_target_empty; bool is_empty, is_target_empty;

View file

@ -31,7 +31,7 @@ KResultOr<FlatPtr> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
else else
g_global_perf_events = PerformanceEventBuffer::try_create_with_size(32 * MiB).leak_ptr(); g_global_perf_events = PerformanceEventBuffer::try_create_with_size(32 * MiB).leak_ptr();
ScopedSpinlock lock(g_profiling_lock); SpinlockLocker lock(g_profiling_lock);
if (!TimeManagement::the().enable_profile_timer()) if (!TimeManagement::the().enable_profile_timer())
return ENOTSUP; return ENOTSUP;
g_profiling_all_threads = true; g_profiling_all_threads = true;
@ -51,7 +51,7 @@ KResultOr<FlatPtr> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
return ESRCH; return ESRCH;
if (!is_superuser() && process->uid() != euid()) if (!is_superuser() && process->uid() != euid())
return EPERM; return EPERM;
ScopedSpinlock lock(g_profiling_lock); SpinlockLocker lock(g_profiling_lock);
g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP; g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP;
process->set_profiling(true); process->set_profiling(true);
if (!process->create_perf_events_buffer_if_needed()) { if (!process->create_perf_events_buffer_if_needed()) {
@ -86,7 +86,7 @@ KResultOr<FlatPtr> Process::sys$profiling_disable(pid_t pid)
return ESRCH; return ESRCH;
if (!is_superuser() && process->uid() != euid()) if (!is_superuser() && process->uid() != euid())
return EPERM; return EPERM;
ScopedSpinlock lock(g_profiling_lock); SpinlockLocker lock(g_profiling_lock);
if (!process->is_profiling()) if (!process->is_profiling())
return EINVAL; return EINVAL;
// FIXME: If we enabled the profile timer and it's not supported, how do we disable it now? // FIXME: If we enabled the profile timer and it's not supported, how do we disable it now?
@ -122,7 +122,7 @@ KResultOr<FlatPtr> Process::sys$profiling_free_buffer(pid_t pid)
return ESRCH; return ESRCH;
if (!is_superuser() && process->uid() != euid()) if (!is_superuser() && process->uid() != euid())
return EPERM; return EPERM;
ScopedSpinlock lock(g_profiling_lock); SpinlockLocker lock(g_profiling_lock);
if (process->is_profiling()) if (process->is_profiling())
return EINVAL; return EINVAL;
process->delete_perf_events_buffer(); process->delete_perf_events_buffer();

View file

@ -18,7 +18,7 @@ namespace Kernel {
static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& params, Process& caller) static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& params, Process& caller)
{ {
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
if (params.request == PT_TRACE_ME) { if (params.request == PT_TRACE_ME) {
if (Process::current().tracer()) if (Process::current().tracer())
return EBUSY; return EBUSY;
@ -55,7 +55,7 @@ static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& par
auto result = peer_process.start_tracing_from(caller.pid()); auto result = peer_process.start_tracing_from(caller.pid());
if (result.is_error()) if (result.is_error())
return result.error(); return result.error();
ScopedSpinlock lock(peer->get_lock()); SpinlockLocker lock(peer->get_lock());
if (peer->state() != Thread::State::Stopped) { if (peer->state() != Thread::State::Stopped) {
peer->send_signal(SIGSTOP, &caller); peer->send_signal(SIGSTOP, &caller);
} }

View file

@ -28,7 +28,7 @@ KResultOr<FlatPtr> Process::sys$sched_setparam(int pid, Userspace<const struct s
return EINVAL; return EINVAL;
auto* peer = Thread::current(); auto* peer = Thread::current();
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
if (pid != 0) if (pid != 0)
peer = Thread::from_tid(pid); peer = Thread::from_tid(pid);
@ -49,7 +49,7 @@ KResultOr<FlatPtr> Process::sys$sched_getparam(pid_t pid, Userspace<struct sched
int priority; int priority;
{ {
auto* peer = Thread::current(); auto* peer = Thread::current();
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
if (pid != 0) { if (pid != 0) {
// FIXME: PID/TID BUG // FIXME: PID/TID BUG
// The entire process is supposed to be affected. // The entire process is supposed to be affected.

View file

@ -77,7 +77,7 @@ KResultOr<FlatPtr> Process::sys$create_thread(void* (*entry)(void*), Userspace<c
PerformanceManager::add_thread_created_event(*thread); PerformanceManager::add_thread_created_event(*thread);
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
thread->set_priority(requested_thread_priority); thread->set_priority(requested_thread_priority);
thread->set_state(Thread::State::Runnable); thread->set_state(Thread::State::Runnable);
return thread->tid().value(); return thread->tid().value();
@ -207,7 +207,7 @@ KResultOr<FlatPtr> Process::sys$get_thread_name(pid_t tid, Userspace<char*> buff
if (!thread || thread->pid() != pid()) if (!thread || thread->pid() != pid())
return ESRCH; return ESRCH;
ScopedSpinlock locker(thread->get_lock()); SpinlockLocker locker(thread->get_lock());
auto thread_name = thread->name(); auto thread_name = thread->name();
if (thread_name.is_null()) { if (thread_name.is_null()) {

View file

@ -59,7 +59,7 @@ UNMAP_AFTER_INIT void ConsoleManagement::initialize()
PANIC("Switch to tty value is invalid: {} ", tty_number); PANIC("Switch to tty value is invalid: {} ", tty_number);
} }
m_active_console = &m_consoles[tty_number]; m_active_console = &m_consoles[tty_number];
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_active_console->set_active(true); m_active_console->set_active(true);
if (!m_active_console->is_graphical()) if (!m_active_console->is_graphical())
m_active_console->clear(); m_active_console->clear();
@ -67,7 +67,7 @@ UNMAP_AFTER_INIT void ConsoleManagement::initialize()
void ConsoleManagement::switch_to(unsigned index) void ConsoleManagement::switch_to(unsigned index)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_active_console); VERIFY(m_active_console);
VERIFY(index < m_consoles.size()); VERIFY(index < m_consoles.size());
if (m_active_console->index() == index) if (m_active_console->index() == index)

View file

@ -259,7 +259,7 @@ void VirtualConsole::on_key_pressed(KeyEvent event)
KResultOr<size_t> VirtualConsole::on_tty_write(const UserOrKernelBuffer& data, size_t size) KResultOr<size_t> VirtualConsole::on_tty_write(const UserOrKernelBuffer& data, size_t size)
{ {
ScopedSpinlock global_lock(ConsoleManagement::the().tty_write_lock()); SpinlockLocker global_lock(ConsoleManagement::the().tty_write_lock());
auto result = data.read_buffered<512>(size, [&](u8 const* buffer, size_t buffer_bytes) { auto result = data.read_buffered<512>(size, [&](u8 const* buffer, size_t buffer_bytes) {
for (size_t i = 0; i < buffer_bytes; ++i) for (size_t i = 0; i < buffer_bytes; ++i)
m_console_impl.on_input(buffer[i]); m_console_impl.on_input(buffer[i]);

View file

@ -147,7 +147,7 @@ Thread::~Thread()
// Specifically, if this is the last thread of a process, checking // Specifically, if this is the last thread of a process, checking
// block conditions would access m_process, which would be in // block conditions would access m_process, which would be in
// the middle of being destroyed. // the middle of being destroyed.
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
VERIFY(!m_process_thread_list_node.is_in_list()); VERIFY(!m_process_thread_list_node.is_in_list());
// We shouldn't be queued // We shouldn't be queued
@ -155,16 +155,16 @@ Thread::~Thread()
} }
} }
void Thread::block(Kernel::Mutex& lock, ScopedSpinlock<Spinlock<u8>>& lock_lock, u32 lock_count) void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock<u8>>& lock_lock, u32 lock_count)
{ {
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current().in_irq());
VERIFY(this == Thread::current()); VERIFY(this == Thread::current());
ScopedCritical critical; ScopedCritical critical;
VERIFY(!Memory::s_mm_lock.own_lock()); VERIFY(!Memory::s_mm_lock.own_lock());
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
switch (state()) { switch (state()) {
case Thread::Stopped: case Thread::Stopped:
@ -212,7 +212,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinlock<Spinlock<u8>>& lock_lock,
} }
VERIFY(Processor::in_critical()); VERIFY(Processor::in_critical());
ScopedSpinlock block_lock2(m_block_lock); SpinlockLocker block_lock2(m_block_lock);
if (should_be_stopped() || state() == Stopped) { if (should_be_stopped() || state() == Stopped) {
dbgln("Thread should be stopped, current state: {}", state_string()); dbgln("Thread should be stopped, current state: {}", state_string());
set_state(Thread::Blocked); set_state(Thread::Blocked);
@ -229,14 +229,14 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinlock<Spinlock<u8>>& lock_lock,
u32 Thread::unblock_from_lock(Kernel::Mutex& lock) u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
{ {
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock); VERIFY(m_blocking_lock == &lock);
auto requested_count = m_lock_requested_count; auto requested_count = m_lock_requested_count;
block_lock.unlock(); block_lock.unlock();
auto do_unblock = [&]() { auto do_unblock = [&]() {
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock); VERIFY(m_blocking_lock == &lock);
VERIFY(!Processor::current().in_irq()); VERIFY(!Processor::current().in_irq());
VERIFY(g_scheduler_lock.own_lock()); VERIFY(g_scheduler_lock.own_lock());
@ -265,8 +265,8 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock)
void Thread::unblock_from_blocker(Blocker& blocker) void Thread::unblock_from_blocker(Blocker& blocker)
{ {
auto do_unblock = [&]() { auto do_unblock = [&]() {
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
if (m_blocker != &blocker) if (m_blocker != &blocker)
return; return;
if (!should_be_stopped() && !is_stopped()) if (!should_be_stopped() && !is_stopped())
@ -322,7 +322,7 @@ void Thread::set_should_die()
// Remember that we should die instead of returning to // Remember that we should die instead of returning to
// the userspace. // the userspace.
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
m_should_die = true; m_should_die = true;
// NOTE: Even the current thread can technically be in "Stopped" // NOTE: Even the current thread can technically be in "Stopped"
@ -337,7 +337,7 @@ void Thread::set_should_die()
resume_from_stopped(); resume_from_stopped();
} }
if (is_blocked()) { if (is_blocked()) {
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
if (m_blocker) { if (m_blocker) {
// We're blocked in the kernel. // We're blocked in the kernel.
m_blocker->set_interrupted_by_death(); m_blocker->set_interrupted_by_death();
@ -359,7 +359,7 @@ void Thread::die_if_needed()
dbgln_if(THREAD_DEBUG, "Thread {} is dying", *this); dbgln_if(THREAD_DEBUG, "Thread {} is dying", *this);
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
// It's possible that we don't reach the code after this block if the // It's possible that we don't reach the code after this block if the
// scheduler is invoked and FinalizerTask cleans up this thread, however // scheduler is invoked and FinalizerTask cleans up this thread, however
// that doesn't matter because we're trying to invoke the scheduler anyway // that doesn't matter because we're trying to invoke the scheduler anyway
@ -478,7 +478,7 @@ StringView Thread::state_string() const
case Thread::Stopped: case Thread::Stopped:
return "Stopped"sv; return "Stopped"sv;
case Thread::Blocked: { case Thread::Blocked: {
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
if (m_blocking_lock) if (m_blocking_lock)
return "Mutex"sv; return "Mutex"sv;
if (m_blocker) if (m_blocker)
@ -498,7 +498,7 @@ void Thread::finalize()
VERIFY(!m_lock.own_lock()); VERIFY(!m_lock.own_lock());
if (lock_count() > 0) { if (lock_count() > 0) {
dbgln("Thread {} leaking {} Locks!", *this, lock_count()); dbgln("Thread {} leaking {} Locks!", *this, lock_count());
ScopedSpinlock list_lock(m_holding_locks_lock); SpinlockLocker list_lock(m_holding_locks_lock);
for (auto& info : m_holding_locks_list) { for (auto& info : m_holding_locks_list) {
const auto& location = info.lock_location; const auto& location = info.lock_location;
dbgln(" - Mutex: \"{}\" @ {} locked in function \"{}\" at \"{}:{}\" with a count of: {}", info.lock->name(), info.lock, location.function_name(), location.filename(), location.line_number(), info.count); dbgln(" - Mutex: \"{}\" @ {} locked in function \"{}\" at \"{}:{}\" with a count of: {}", info.lock->name(), info.lock, location.function_name(), location.filename(), location.line_number(), info.count);
@ -508,7 +508,7 @@ void Thread::finalize()
#endif #endif
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
dbgln_if(THREAD_DEBUG, "Finalizing thread {}", *this); dbgln_if(THREAD_DEBUG, "Finalizing thread {}", *this);
set_state(Thread::State::Dead); set_state(Thread::State::Dead);
m_join_condition.thread_finalizing(); m_join_condition.thread_finalizing();
@ -533,7 +533,7 @@ void Thread::finalize_dying_threads()
VERIFY(Thread::current() == g_finalizer); VERIFY(Thread::current() == g_finalizer);
Vector<Thread*, 32> dying_threads; Vector<Thread*, 32> dying_threads;
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
for_each_in_state(Thread::State::Dying, [&](Thread& thread) { for_each_in_state(Thread::State::Dying, [&](Thread& thread) {
if (thread.is_finalizable()) if (thread.is_finalizable())
dying_threads.append(&thread); dying_threads.append(&thread);
@ -566,7 +566,7 @@ void Thread::update_time_scheduled(u64 current_scheduler_time, bool is_kernel, b
Scheduler::add_time_scheduled(delta, is_kernel); Scheduler::add_time_scheduled(delta, is_kernel);
auto& total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user; auto& total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user;
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
total_time += delta; total_time += delta;
} }
} }
@ -592,9 +592,9 @@ void Thread::check_dispatch_pending_signal()
{ {
auto result = DispatchSignalResult::Continue; auto result = DispatchSignalResult::Continue;
{ {
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
if (pending_signals_for_state()) { if (pending_signals_for_state()) {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
result = dispatch_one_pending_signal(); result = dispatch_one_pending_signal();
} }
} }
@ -610,7 +610,7 @@ void Thread::check_dispatch_pending_signal()
u32 Thread::pending_signals() const u32 Thread::pending_signals() const
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
return pending_signals_for_state(); return pending_signals_for_state();
} }
@ -626,7 +626,7 @@ u32 Thread::pending_signals_for_state() const
void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
{ {
VERIFY(signal < 32); VERIFY(signal < 32);
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
// FIXME: Figure out what to do for masked signals. Should we also ignore them here? // FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if (should_ignore_signal(signal)) { if (should_ignore_signal(signal)) {
@ -645,13 +645,13 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release); m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
if (m_state == Stopped) { if (m_state == Stopped) {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (pending_signals_for_state()) { if (pending_signals_for_state()) {
dbgln_if(SIGNAL_DEBUG, "Signal: Resuming stopped {} to deliver signal {}", *this, signal); dbgln_if(SIGNAL_DEBUG, "Signal: Resuming stopped {} to deliver signal {}", *this, signal);
resume_from_stopped(); resume_from_stopped();
} }
} else { } else {
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
dbgln_if(SIGNAL_DEBUG, "Signal: Unblocking {} to deliver signal {}", *this, signal); dbgln_if(SIGNAL_DEBUG, "Signal: Unblocking {} to deliver signal {}", *this, signal);
unblock(signal); unblock(signal);
} }
@ -659,7 +659,7 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
u32 Thread::update_signal_mask(u32 signal_mask) u32 Thread::update_signal_mask(u32 signal_mask)
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
auto previous_signal_mask = m_signal_mask; auto previous_signal_mask = m_signal_mask;
m_signal_mask = signal_mask; m_signal_mask = signal_mask;
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release); m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
@ -668,13 +668,13 @@ u32 Thread::update_signal_mask(u32 signal_mask)
u32 Thread::signal_mask() const u32 Thread::signal_mask() const
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
return m_signal_mask; return m_signal_mask;
} }
u32 Thread::signal_mask_block(sigset_t signal_set, bool block) u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
auto previous_signal_mask = m_signal_mask; auto previous_signal_mask = m_signal_mask;
if (block) if (block)
m_signal_mask &= ~signal_set; m_signal_mask &= ~signal_set;
@ -686,7 +686,7 @@ u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
void Thread::clear_signals() void Thread::clear_signals()
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
m_signal_mask = 0; m_signal_mask = 0;
m_pending_signals = 0; m_pending_signals = 0;
m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release); m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release);
@ -704,7 +704,7 @@ void Thread::send_urgent_signal_to_self(u8 signal)
VERIFY(Thread::current() == this); VERIFY(Thread::current() == this);
DispatchSignalResult result; DispatchSignalResult result;
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
result = dispatch_signal(signal); result = dispatch_signal(signal);
} }
if (result == DispatchSignalResult::Yield) if (result == DispatchSignalResult::Yield)
@ -730,8 +730,8 @@ DispatchSignalResult Thread::dispatch_one_pending_signal()
DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal) DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal)
{ {
VERIFY(signal != 0); VERIFY(signal != 0);
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask; u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
if (!(signal_candidates & (1 << (signal - 1)))) if (!(signal_candidates & (1 << (signal - 1))))
return DispatchSignalResult::Continue; return DispatchSignalResult::Continue;
@ -821,7 +821,7 @@ void Thread::resume_from_stopped()
VERIFY(m_stop_state != State::Invalid); VERIFY(m_stop_state != State::Invalid);
VERIFY(g_scheduler_lock.own_lock()); VERIFY(g_scheduler_lock.own_lock());
if (m_stop_state == Blocked) { if (m_stop_state == Blocked) {
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
if (m_blocker || m_blocking_lock) { if (m_blocker || m_blocking_lock) {
// Hasn't been unblocked yet // Hasn't been unblocked yet
set_state(Blocked, 0); set_state(Blocked, 0);
@ -1055,7 +1055,7 @@ void Thread::set_state(State new_state, u8 stop_signal)
return; return;
{ {
ScopedSpinlock thread_lock(m_lock); SpinlockLocker thread_lock(m_lock);
previous_state = m_state; previous_state = m_state;
if (previous_state == Invalid) { if (previous_state == Invalid) {
// If we were *just* created, we may have already pending signals // If we were *just* created, we may have already pending signals

View file

@ -177,13 +177,13 @@ public:
void detach() void detach()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_is_joinable = false; m_is_joinable = false;
} }
[[nodiscard]] bool is_joinable() const [[nodiscard]] bool is_joinable() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return m_is_joinable; return m_is_joinable;
} }
@ -200,7 +200,7 @@ public:
void set_name(OwnPtr<KString> name) void set_name(OwnPtr<KString> name)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_name = move(name); m_name = move(name);
} }
@ -309,28 +309,28 @@ public:
virtual void was_unblocked(bool did_timeout) virtual void was_unblocked(bool did_timeout)
{ {
if (did_timeout) { if (did_timeout) {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_did_timeout = true; m_did_timeout = true;
} }
} }
void set_interrupted_by_death() void set_interrupted_by_death()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_set_interrupted_by_death(); do_set_interrupted_by_death();
} }
void set_interrupted_by_signal(u8 signal) void set_interrupted_by_signal(u8 signal)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_set_interrupted_by_signal(signal); do_set_interrupted_by_signal(signal);
} }
u8 was_interrupted_by_signal() const u8 was_interrupted_by_signal() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_get_interrupted_by_signal(); return do_get_interrupted_by_signal();
} }
virtual Thread::BlockResult block_result() virtual Thread::BlockResult block_result()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_was_interrupted_by_death) if (m_was_interrupted_by_death)
return Thread::BlockResult::InterruptedByDeath; return Thread::BlockResult::InterruptedByDeath;
if (m_was_interrupted_by_signal != 0) if (m_was_interrupted_by_signal != 0)
@ -370,7 +370,7 @@ public:
RefPtr<Thread> thread; RefPtr<Thread> thread;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_is_blocking) { if (m_is_blocking) {
m_is_blocking = false; m_is_blocking = false;
VERIFY(m_blocked_thread); VERIFY(m_blocked_thread);
@ -409,13 +409,13 @@ public:
virtual ~BlockCondition() virtual ~BlockCondition()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_blockers.is_empty()); VERIFY(m_blockers.is_empty());
} }
bool add_blocker(Blocker& blocker, void* data) bool add_blocker(Blocker& blocker, void* data)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!should_add_blocker(blocker, data)) if (!should_add_blocker(blocker, data))
return false; return false;
m_blockers.append({ &blocker, data }); m_blockers.append({ &blocker, data });
@ -424,7 +424,7 @@ public:
void remove_blocker(Blocker& blocker, void* data) void remove_blocker(Blocker& blocker, void* data)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// NOTE: it's possible that the blocker is no longer present // NOTE: it's possible that the blocker is no longer present
m_blockers.remove_first_matching([&](auto& info) { m_blockers.remove_first_matching([&](auto& info) {
return info.blocker == &blocker && info.data == data; return info.blocker == &blocker && info.data == data;
@ -433,7 +433,7 @@ public:
bool is_empty() const bool is_empty() const
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return is_empty_locked(); return is_empty_locked();
} }
@ -441,7 +441,7 @@ public:
template<typename UnblockOne> template<typename UnblockOne>
bool unblock(UnblockOne unblock_one) bool unblock(UnblockOne unblock_one)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return do_unblock(unblock_one); return do_unblock(unblock_one);
} }
@ -785,7 +785,7 @@ public:
if (Thread::current() == this) if (Thread::current() == this)
return EDEADLK; return EDEADLK;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!m_is_joinable || state() == Dead) if (!m_is_joinable || state() == Dead)
return EINVAL; return EINVAL;
@ -808,7 +808,7 @@ public:
[[nodiscard]] bool is_blocked() const { return m_state == Blocked; } [[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
[[nodiscard]] bool is_in_block() const [[nodiscard]] bool is_in_block() const
{ {
ScopedSpinlock lock(m_block_lock); SpinlockLocker lock(m_block_lock);
return m_in_block; return m_in_block;
} }
@ -841,7 +841,7 @@ public:
// tick or entering the next system call, or if it's in kernel // tick or entering the next system call, or if it's in kernel
// mode then we will intercept prior to returning back to user // mode then we will intercept prior to returning back to user
// mode. // mode.
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
while (state() == Thread::Stopped) { while (state() == Thread::Stopped) {
lock.unlock(); lock.unlock();
// We shouldn't be holding the big lock here // We shouldn't be holding the big lock here
@ -850,7 +850,7 @@ public:
} }
} }
void block(Kernel::Mutex&, ScopedSpinlock<Spinlock<u8>>&, u32); void block(Kernel::Mutex&, SpinlockLocker<Spinlock<u8>>&, u32);
template<typename BlockerType, class... Args> template<typename BlockerType, class... Args>
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
@ -860,13 +860,13 @@ public:
ScopedCritical critical; ScopedCritical critical;
VERIFY(!Memory::s_mm_lock.own_lock()); VERIFY(!Memory::s_mm_lock.own_lock());
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
// We need to hold m_block_lock so that nobody can unblock a blocker as soon // We need to hold m_block_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere // as it is constructed and registered elsewhere
m_in_block = true; m_in_block = true;
BlockerType blocker(forward<Args>(args)...); BlockerType blocker(forward<Args>(args)...);
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
// Relaxed semantics are fine for timeout_unblocked because we // Relaxed semantics are fine for timeout_unblocked because we
// synchronize on the spin locks already. // synchronize on the spin locks already.
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false); Atomic<bool, AK::MemoryOrder::memory_order_relaxed> timeout_unblocked(false);
@ -901,8 +901,8 @@ public:
VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!g_scheduler_lock.own_lock());
VERIFY(!m_block_lock.own_lock()); VERIFY(!m_block_lock.own_lock());
// NOTE: this may execute on the same or any other processor! // NOTE: this may execute on the same or any other processor!
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
ScopedSpinlock block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
if (m_blocker && timeout_unblocked.exchange(true) == false) if (m_blocker && timeout_unblocked.exchange(true) == false)
unblock(); unblock();
}); });
@ -934,7 +934,7 @@ public:
yield_without_releasing_big_lock(); yield_without_releasing_big_lock();
VERIFY(Processor::in_critical()); VERIFY(Processor::in_critical());
ScopedSpinlock block_lock2(m_block_lock); SpinlockLocker block_lock2(m_block_lock);
if (should_be_stopped() || state() == Stopped) { if (should_be_stopped() || state() == Stopped) {
dbgln("Thread should be stopped, current state: {}", state_string()); dbgln("Thread should be stopped, current state: {}", state_string());
set_state(Thread::Blocked); set_state(Thread::Blocked);
@ -960,8 +960,8 @@ public:
} }
if (blocker.was_interrupted_by_signal()) { if (blocker.was_interrupted_by_signal()) {
ScopedSpinlock scheduler_lock(g_scheduler_lock); SpinlockLocker scheduler_lock(g_scheduler_lock);
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dispatch_one_pending_signal(); dispatch_one_pending_signal();
} }
@ -1120,7 +1120,7 @@ public:
// We can't finalize until the thread is either detached or // We can't finalize until the thread is either detached or
// a join has started. We can't make m_is_joinable atomic // a join has started. We can't make m_is_joinable atomic
// because that would introduce a race in try_join. // because that would introduce a race in try_join.
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
return !m_is_joinable; return !m_is_joinable;
} }
@ -1165,7 +1165,7 @@ public:
{ {
VERIFY(refs_delta != 0); VERIFY(refs_delta != 0);
m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed); m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
ScopedSpinlock list_lock(m_holding_locks_lock); SpinlockLocker list_lock(m_holding_locks_lock);
if (refs_delta > 0) { if (refs_delta > 0) {
bool have_existing = false; bool have_existing = false;
for (size_t i = 0; i < m_holding_locks_list.size(); i++) { for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
@ -1236,7 +1236,7 @@ private:
public: public:
void thread_did_exit(void* exit_value) void thread_did_exit(void* exit_value)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(!m_thread_did_exit); VERIFY(!m_thread_did_exit);
m_thread_did_exit = true; m_thread_did_exit = true;
m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release); m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
@ -1244,7 +1244,7 @@ private:
} }
void thread_finalizing() void thread_finalizing()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
do_unblock_joiner(); do_unblock_joiner();
} }
void* exit_value() const void* exit_value() const
@ -1255,7 +1255,7 @@ private:
void try_unblock(JoinBlocker& blocker) void try_unblock(JoinBlocker& blocker)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_thread_did_exit) if (m_thread_did_exit)
blocker.unblock(exit_value(), false); blocker.unblock(exit_value(), false);
} }

View file

@ -41,14 +41,14 @@ bool Thread::Blocker::set_block_condition(Thread::BlockCondition& block_conditio
Thread::Blocker::~Blocker() Thread::Blocker::~Blocker()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_block_condition) if (m_block_condition)
m_block_condition->remove_blocker(*this, m_block_data); m_block_condition->remove_blocker(*this, m_block_data);
} }
void Thread::Blocker::begin_blocking(Badge<Thread>) void Thread::Blocker::begin_blocking(Badge<Thread>)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(!m_is_blocking); VERIFY(!m_is_blocking);
VERIFY(!m_blocked_thread); VERIFY(!m_blocked_thread);
m_blocked_thread = Thread::current(); m_blocked_thread = Thread::current();
@ -57,7 +57,7 @@ void Thread::Blocker::begin_blocking(Badge<Thread>)
auto Thread::Blocker::end_blocking(Badge<Thread>, bool did_timeout) -> BlockResult auto Thread::Blocker::end_blocking(Badge<Thread>, bool did_timeout) -> BlockResult
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// if m_is_blocking is false here, some thread forced to // if m_is_blocking is false here, some thread forced to
// unblock us when we get here. This is only called from the // unblock us when we get here. This is only called from the
// thread that was blocked. // thread that was blocked.
@ -76,7 +76,7 @@ Thread::JoinBlocker::JoinBlocker(Thread& joinee, KResult& try_join_result, void*
{ {
// We need to hold our lock to avoid a race where try_join succeeds // We need to hold our lock to avoid a race where try_join succeeds
// but the joinee is joining immediately // but the joinee is joining immediately
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
try_join_result = joinee.try_join([&]() { try_join_result = joinee.try_join([&]() {
if (!set_block_condition(joinee.m_join_condition)) if (!set_block_condition(joinee.m_join_condition))
m_should_block = false; m_should_block = false;
@ -105,7 +105,7 @@ void Thread::JoinBlocker::not_blocking(bool timeout_in_past)
bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker) bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker)
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
m_did_unblock = true; m_did_unblock = true;
@ -132,7 +132,7 @@ Thread::QueueBlocker::~QueueBlocker()
bool Thread::QueueBlocker::unblock() bool Thread::QueueBlocker::unblock()
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
m_did_unblock = true; m_did_unblock = true;
@ -164,7 +164,7 @@ void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue)
bool Thread::FutexBlocker::unblock_bitset(u32 bitset) bool Thread::FutexBlocker::unblock_bitset(u32 bitset)
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock || (bitset != FUTEX_BITSET_MATCH_ANY && (m_bitset & bitset) == 0)) if (m_did_unblock || (bitset != FUTEX_BITSET_MATCH_ANY && (m_bitset & bitset) == 0))
return false; return false;
@ -178,7 +178,7 @@ bool Thread::FutexBlocker::unblock_bitset(u32 bitset)
bool Thread::FutexBlocker::unblock(bool force) bool Thread::FutexBlocker::unblock(bool force)
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return force; return force;
m_did_unblock = true; m_did_unblock = true;
@ -205,7 +205,7 @@ bool Thread::FileDescriptionBlocker::unblock(bool from_add_blocker, void*)
return false; return false;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
m_did_unblock = true; m_did_unblock = true;
@ -364,7 +364,7 @@ void Thread::SelectBlocker::not_blocking(bool timeout_in_past)
{ {
// Either the timeout was in the past or we didn't add all blockers // Either the timeout was in the past or we didn't add all blockers
VERIFY(timeout_in_past || !m_should_block); VERIFY(timeout_in_past || !m_should_block);
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (!m_should_block || !m_did_unblock) { if (!m_should_block || !m_did_unblock) {
m_did_unblock = true; m_did_unblock = true;
if (!timeout_in_past) { if (!timeout_in_past) {
@ -380,7 +380,7 @@ bool Thread::SelectBlocker::unblock(bool from_add_blocker, void* data)
auto& fd_info = *static_cast<FDInfo*>(data); auto& fd_info = *static_cast<FDInfo*>(data);
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
@ -425,7 +425,7 @@ void Thread::SelectBlocker::was_unblocked(bool did_timeout)
Blocker::was_unblocked(did_timeout); Blocker::was_unblocked(did_timeout);
if (!did_timeout && !was_interrupted()) { if (!did_timeout && !was_interrupted()) {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(m_did_unblock); VERIFY(m_did_unblock);
} }
size_t count = collect_unblocked_flags(); size_t count = collect_unblocked_flags();
@ -447,7 +447,7 @@ Thread::WaitBlockCondition::ProcessBlockInfo::~ProcessBlockInfo()
void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker) void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// We if we have any processes pending // We if we have any processes pending
for (size_t i = 0; i < m_processes.size(); i++) { for (size_t i = 0; i < m_processes.size(); i++) {
auto& info = m_processes[i]; auto& info = m_processes[i];
@ -472,7 +472,7 @@ void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker)
void Thread::WaitBlockCondition::disowned_by_waiter(Process& process) void Thread::WaitBlockCondition::disowned_by_waiter(Process& process)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_finalized) if (m_finalized)
return; return;
for (size_t i = 0; i < m_processes.size();) { for (size_t i = 0; i < m_processes.size();) {
@ -502,7 +502,7 @@ bool Thread::WaitBlockCondition::unblock(Process& process, WaitBlocker::UnblockF
bool did_wait = false; bool did_wait = false;
bool was_waited_already = false; bool was_waited_already = false;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_finalized) if (m_finalized)
return false; return false;
if (flags != WaitBlocker::UnblockFlags::Terminated) { if (flags != WaitBlocker::UnblockFlags::Terminated) {
@ -573,7 +573,7 @@ bool Thread::WaitBlockCondition::should_add_blocker(Blocker& b, void*)
void Thread::WaitBlockCondition::finalize() void Thread::WaitBlockCondition::finalize()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
VERIFY(!m_finalized); VERIFY(!m_finalized);
m_finalized = true; m_finalized = true;
@ -637,7 +637,7 @@ void Thread::WaitBlocker::was_unblocked(bool)
{ {
bool got_sigchld, try_unblock; bool got_sigchld, try_unblock;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
try_unblock = !m_did_unblock; try_unblock = !m_did_unblock;
got_sigchld = m_got_sigchild; got_sigchld = m_got_sigchild;
} }
@ -720,7 +720,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa
return false; return false;
break; break;
case UnblockFlags::Disowned: case UnblockFlags::Disowned:
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
// Disowning must unblock anyone waiting for this process explicitly // Disowning must unblock anyone waiting for this process explicitly
if (!m_did_unblock) if (!m_did_unblock)
do_was_disowned(); do_was_disowned();
@ -730,7 +730,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa
if (flags == UnblockFlags::Terminated) { if (flags == UnblockFlags::Terminated) {
VERIFY(process.is_dead()); VERIFY(process.is_dead());
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
// Up until this point, this function may have been called // Up until this point, this function may have been called
@ -739,7 +739,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa
} else { } else {
siginfo_t siginfo {}; siginfo_t siginfo {};
{ {
ScopedSpinlock lock(g_scheduler_lock); SpinlockLocker lock(g_scheduler_lock);
// We need to gather the information before we release the scheduler lock! // We need to gather the information before we release the scheduler lock!
siginfo.si_signo = SIGCHLD; siginfo.si_signo = SIGCHLD;
siginfo.si_pid = process.pid().value(); siginfo.si_pid = process.pid().value();
@ -759,7 +759,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa
} }
} }
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
if (m_did_unblock) if (m_did_unblock)
return false; return false;
// Up until this point, this function may have been called // Up until this point, this function may have been called

View file

@ -67,7 +67,7 @@ bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t cloc
// returning from the timer handler and a call to cancel_timer(). // returning from the timer handler and a call to cancel_timer().
timer->setup(clock_id, deadline, move(callback)); timer->setup(clock_id, deadline, move(callback));
ScopedSpinlock lock(g_timerqueue_lock); SpinlockLocker lock(g_timerqueue_lock);
timer->m_id = 0; // Don't generate a timer id timer->m_id = 0; // Don't generate a timer id
add_timer_locked(move(timer)); add_timer_locked(move(timer));
return true; return true;
@ -75,7 +75,7 @@ bool TimerQueue::add_timer_without_id(NonnullRefPtr<Timer> timer, clockid_t cloc
TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer) TimerId TimerQueue::add_timer(NonnullRefPtr<Timer>&& timer)
{ {
ScopedSpinlock lock(g_timerqueue_lock); SpinlockLocker lock(g_timerqueue_lock);
timer->m_id = ++m_timer_id_count; timer->m_id = ++m_timer_id_count;
VERIFY(timer->m_id != 0); // wrapped VERIFY(timer->m_id != 0); // wrapped
@ -130,7 +130,7 @@ bool TimerQueue::cancel_timer(TimerId id)
Timer* found_timer = nullptr; Timer* found_timer = nullptr;
Queue* timer_queue = nullptr; Queue* timer_queue = nullptr;
ScopedSpinlock lock(g_timerqueue_lock); SpinlockLocker lock(g_timerqueue_lock);
for (auto& timer : m_timer_queue_monotonic.list) { for (auto& timer : m_timer_queue_monotonic.list) {
if (timer.m_id == id) { if (timer.m_id == id) {
found_timer = &timer; found_timer = &timer;
@ -207,7 +207,7 @@ bool TimerQueue::cancel_timer(Timer& timer, bool* was_in_use)
if (!did_already_run) { if (!did_already_run) {
timer.clear_in_use(); timer.clear_in_use();
ScopedSpinlock lock(g_timerqueue_lock); SpinlockLocker lock(g_timerqueue_lock);
if (timer_queue.list.contains(timer)) { if (timer_queue.list.contains(timer)) {
// The timer has not fired, remove it // The timer has not fired, remove it
VERIFY(timer.ref_count() > 1); VERIFY(timer.ref_count() > 1);
@ -251,7 +251,7 @@ void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer)
void TimerQueue::fire() void TimerQueue::fire()
{ {
ScopedSpinlock lock(g_timerqueue_lock); SpinlockLocker lock(g_timerqueue_lock);
auto fire_timers = [&](Queue& queue) { auto fire_timers = [&](Queue& queue) {
auto* timer = queue.list.first(); auto* timer = queue.list.first();
@ -274,7 +274,7 @@ void TimerQueue::fire()
// our reference and don't execute the callback. // our reference and don't execute the callback.
if (!timer->set_cancelled()) { if (!timer->set_cancelled()) {
timer->m_callback(); timer->m_callback();
ScopedSpinlock lock(g_timerqueue_lock); SpinlockLocker lock(g_timerqueue_lock);
m_timers_executing.remove(*timer); m_timers_executing.remove(*timer);
} }
timer->clear_in_use(); timer->clear_in_use();

View file

@ -27,7 +27,7 @@ bool WaitQueue::should_add_blocker(Thread::Blocker& b, void* data)
u32 WaitQueue::wake_one() u32 WaitQueue::wake_one()
{ {
u32 did_wake = 0; u32 did_wake = 0;
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one", this); dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one", this);
bool did_unblock_one = do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) { bool did_unblock_one = do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
VERIFY(data); VERIFY(data);
@ -50,7 +50,7 @@ u32 WaitQueue::wake_n(u32 wake_count)
{ {
if (wake_count == 0) if (wake_count == 0)
return 0; // should we assert instead? return 0; // should we assert instead?
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({})", this, wake_count); dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({})", this, wake_count);
u32 did_wake = 0; u32 did_wake = 0;
@ -74,7 +74,7 @@ u32 WaitQueue::wake_n(u32 wake_count)
u32 WaitQueue::wake_all() u32 WaitQueue::wake_all()
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all", this); dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all", this);
u32 did_wake = 0; u32 did_wake = 0;

View file

@ -20,7 +20,7 @@ public:
void should_block(bool block) void should_block(bool block)
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_should_block = block; m_should_block = block;
} }

View file

@ -27,7 +27,7 @@ UNMAP_AFTER_INIT WorkQueue::WorkQueue(const char* name)
WorkItem* item; WorkItem* item;
bool have_more; bool have_more;
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
item = m_items.take_first(); item = m_items.take_first();
have_more = !m_items.is_empty(); have_more = !m_items.is_empty();
} }
@ -48,7 +48,7 @@ UNMAP_AFTER_INIT WorkQueue::WorkQueue(const char* name)
void WorkQueue::do_queue(WorkItem* item) void WorkQueue::do_queue(WorkItem* item)
{ {
{ {
ScopedSpinlock lock(m_lock); SpinlockLocker lock(m_lock);
m_items.append(*item); m_items.append(*item);
} }
m_wait_queue.wake_one(); m_wait_queue.wake_one();

View file

@ -153,7 +153,7 @@ static inline void internal_dbgputch(char ch)
extern "C" void dbgputch(char ch) extern "C" void dbgputch(char ch)
{ {
ScopedSpinlock lock(s_log_lock); SpinlockLocker lock(s_log_lock);
internal_dbgputch(ch); internal_dbgputch(ch);
} }
@ -161,7 +161,7 @@ extern "C" void dbgputstr(const char* characters, size_t length)
{ {
if (!characters) if (!characters)
return; return;
ScopedSpinlock lock(s_log_lock); SpinlockLocker lock(s_log_lock);
for (size_t i = 0; i < length; ++i) for (size_t i = 0; i < length; ++i)
internal_dbgputch(characters[i]); internal_dbgputch(characters[i]);
} }
@ -175,7 +175,7 @@ extern "C" void kernelputstr(const char* characters, size_t length)
{ {
if (!characters) if (!characters)
return; return;
ScopedSpinlock lock(s_log_lock); SpinlockLocker lock(s_log_lock);
for (size_t i = 0; i < length; ++i) for (size_t i = 0; i < length; ++i)
console_out(characters[i]); console_out(characters[i]);
} }
@ -184,7 +184,7 @@ extern "C" void kernelcriticalputstr(const char* characters, size_t length)
{ {
if (!characters) if (!characters)
return; return;
ScopedSpinlock lock(s_log_lock); SpinlockLocker lock(s_log_lock);
for (size_t i = 0; i < length; ++i) for (size_t i = 0; i < length; ++i)
critical_console_out(characters[i]); critical_console_out(characters[i]);
} }