diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index 0a8dce6fb4..4b9cf2c1ce 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -501,7 +501,7 @@ Vector Processor::capture_stack_trace(Thread& thread, size_t max_frames // is a chance a context switch may happen while we're trying // to get it. It also won't be entirely accurate and merely // reflect the status at the last context switch. - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); if (&thread == Processor::current_thread()) { VERIFY(thread.state() == Thread::Running); // Leave the scheduler lock. If we trigger page faults we may diff --git a/Kernel/Bus/PCI/MMIOAccess.cpp b/Kernel/Bus/PCI/MMIOAccess.cpp index 077c5703fa..f1c57ea827 100644 --- a/Kernel/Bus/PCI/MMIOAccess.cpp +++ b/Kernel/Bus/PCI/MMIOAccess.cpp @@ -117,7 +117,7 @@ VirtualAddress MMIOAccess::get_device_configuration_space(Address address) u8 MMIOAccess::read8_field(Address address, u32 field) { - ScopedSpinlock lock(m_access_lock); + SpinlockLocker lock(m_access_lock); VERIFY(field <= 0xfff); dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 8-bit field {:#08x} for {}", field, address); return *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))); @@ -125,7 +125,7 @@ u8 MMIOAccess::read8_field(Address address, u32 field) u16 MMIOAccess::read16_field(Address address, u32 field) { - ScopedSpinlock lock(m_access_lock); + SpinlockLocker lock(m_access_lock); VERIFY(field < 0xfff); dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 16-bit field {:#08x} for {}", field, address); u16 data = 0; @@ -135,7 +135,7 @@ u16 MMIOAccess::read16_field(Address address, u32 field) u32 MMIOAccess::read32_field(Address address, u32 field) { - ScopedSpinlock lock(m_access_lock); + SpinlockLocker lock(m_access_lock); VERIFY(field <= 0xffc); dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 32-bit field {:#08x} for {}", field, address); u32 data = 0; @@ -145,21 +145,21 @@ u32 MMIOAccess::read32_field(Address address, u32 field) void MMIOAccess::write8_field(Address address, u32 field, u8 value) { - ScopedSpinlock lock(m_access_lock); + SpinlockLocker lock(m_access_lock); VERIFY(field <= 0xfff); dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 8-bit field {:#08x}, value={:#02x} for {}", field, value, address); *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))) = value; } void MMIOAccess::write16_field(Address address, u32 field, u16 value) { - ScopedSpinlock lock(m_access_lock); + SpinlockLocker lock(m_access_lock); VERIFY(field < 0xfff); dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 16-bit field {:#08x}, value={:#02x} for {}", field, value, address); ByteReader::store(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value); } void MMIOAccess::write32_field(Address address, u32 field, u32 value) { - ScopedSpinlock lock(m_access_lock); + SpinlockLocker lock(m_access_lock); VERIFY(field <= 0xffc); dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 32-bit field {:#08x}, value={:#02x} for {}", field, value, address); ByteReader::store(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value); diff --git a/Kernel/Bus/USB/SysFSUSB.cpp b/Kernel/Bus/USB/SysFSUSB.cpp index 88a751bbe0..482ddd8262 100644 --- a/Kernel/Bus/USB/SysFSUSB.cpp +++ b/Kernel/Bus/USB/SysFSUSB.cpp @@ -57,7 +57,7 @@ KResultOr SysFSUSBDeviceInformation::read_bytes(off_t offset, size_t cou KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function callback) const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // Note: if the parent directory is null, it means something bad happened as this should not happen for the USB directory. VERIFY(m_parent_directory); callback({ ".", { fsid, component_index() }, 0 }); @@ -72,7 +72,7 @@ KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function SysFSUSBBusDirectory::lookup(StringView name) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); for (auto& device_node : m_device_nodes) { if (device_node.name() == name) { return device_node; @@ -93,7 +93,7 @@ RefPtr SysFSUSBBusDirectory::device_node_for(USB::Dev void SysFSUSBBusDirectory::plug(USB::Device& new_device) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); auto device_node = device_node_for(new_device); VERIFY(!device_node); m_device_nodes.append(SysFSUSBDeviceInformation::create(new_device)); @@ -101,7 +101,7 @@ void SysFSUSBBusDirectory::plug(USB::Device& new_device) void SysFSUSBBusDirectory::unplug(USB::Device& deleted_device) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); auto device_node = device_node_for(deleted_device); VERIFY(device_node); device_node->m_list_node.remove(); diff --git a/Kernel/Bus/VirtIO/VirtIOConsole.cpp b/Kernel/Bus/VirtIO/VirtIOConsole.cpp index 17a1fdfba6..9995d98d39 100644 --- a/Kernel/Bus/VirtIO/VirtIOConsole.cpp +++ b/Kernel/Bus/VirtIO/VirtIOConsole.cpp @@ -64,9 +64,9 @@ void VirtIOConsole::handle_queue_update(u16 queue_index) dbgln_if(VIRTIO_DEBUG, "VirtIOConsole: Handle queue update {}", queue_index); if (queue_index == CONTROL_RECEIVEQ) { - ScopedSpinlock ringbuffer_lock(m_control_receive_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_control_receive_buffer->lock()); auto& queue = get_queue(CONTROL_RECEIVEQ); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); size_t used; VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); @@ -81,9 +81,9 @@ void VirtIOConsole::handle_queue_update(u16 queue_index) popped_chain = queue.pop_used_buffer_chain(used); } } else if (queue_index == CONTROL_TRANSMITQ) { - ScopedSpinlock ringbuffer_lock(m_control_transmit_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_control_transmit_buffer->lock()); auto& queue = get_queue(CONTROL_TRANSMITQ); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); size_t used; VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); auto number_of_messages = 0; @@ -112,7 +112,7 @@ void VirtIOConsole::setup_multiport() m_control_transmit_buffer = make("VirtIOConsole control transmit queue", CONTROL_BUFFER_SIZE); auto& queue = get_queue(CONTROL_RECEIVEQ); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); VirtIOQueueChain chain(queue); auto offset = 0ul; @@ -184,7 +184,7 @@ void VirtIOConsole::process_control_message(ControlMessage message) } void VirtIOConsole::write_control_message(ControlMessage message) { - ScopedSpinlock ringbuffer_lock(m_control_transmit_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_control_transmit_buffer->lock()); PhysicalAddress start_of_chunk; size_t length_of_chunk; @@ -197,7 +197,7 @@ void VirtIOConsole::write_control_message(ControlMessage message) } auto& queue = get_queue(CONTROL_TRANSMITQ); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); VirtIOQueueChain chain(queue); bool did_add_buffer = chain.add_buffer_to_chain(start_of_chunk, length_of_chunk, BufferType::DeviceReadable); diff --git a/Kernel/Bus/VirtIO/VirtIOConsolePort.cpp b/Kernel/Bus/VirtIO/VirtIOConsolePort.cpp index b263cb9074..d902401a94 100644 --- a/Kernel/Bus/VirtIO/VirtIOConsolePort.cpp +++ b/Kernel/Bus/VirtIO/VirtIOConsolePort.cpp @@ -27,7 +27,7 @@ VirtIOConsolePort::VirtIOConsolePort(unsigned port, VirtIOConsole& console) void VirtIOConsolePort::init_receive_buffer() { auto& queue = m_console.get_queue(m_receive_queue); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); VirtIOQueueChain chain(queue); auto buffer_start = m_receive_buffer->start_of_region(); @@ -42,11 +42,11 @@ void VirtIOConsolePort::handle_queue_update(Badge, u16 queue_inde VERIFY(queue_index == m_transmit_queue || queue_index == m_receive_queue); if (queue_index == m_receive_queue) { auto& queue = m_console.get_queue(m_receive_queue); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); size_t used; VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); - ScopedSpinlock ringbuffer_lock(m_receive_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_receive_buffer->lock()); auto used_space = m_receive_buffer->reserve_space(used).value(); auto remaining_space = m_receive_buffer->bytes_till_end(); @@ -65,9 +65,9 @@ void VirtIOConsolePort::handle_queue_update(Badge, u16 queue_inde evaluate_block_conditions(); } else { - ScopedSpinlock ringbuffer_lock(m_transmit_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_transmit_buffer->lock()); auto& queue = m_console.get_queue(m_transmit_queue); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); size_t used; VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used); do { @@ -92,7 +92,7 @@ KResultOr VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKern if (!size) return 0; - ScopedSpinlock ringbuffer_lock(m_receive_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_receive_buffer->lock()); if (!can_read(desc, size)) return EAGAIN; @@ -102,7 +102,7 @@ KResultOr VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKern if (m_receive_buffer_exhausted && m_receive_buffer->used_bytes() == 0) { auto& queue = m_console.get_queue(m_receive_queue); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); VirtIOQueueChain new_chain(queue); new_chain.add_buffer_to_chain(m_receive_buffer->start_of_region(), RINGBUFFER_SIZE, BufferType::DeviceWritable); m_console.supply_chain_and_notify(m_receive_queue, new_chain); @@ -122,9 +122,9 @@ KResultOr VirtIOConsolePort::write(FileDescription& desc, u64, const Use if (!size) return 0; - ScopedSpinlock ringbuffer_lock(m_transmit_buffer->lock()); + SpinlockLocker ringbuffer_lock(m_transmit_buffer->lock()); auto& queue = m_console.get_queue(m_transmit_queue); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); if (!can_write(desc, size)) return EAGAIN; diff --git a/Kernel/Bus/VirtIO/VirtIOQueue.cpp b/Kernel/Bus/VirtIO/VirtIOQueue.cpp index b931c88781..21a716ecff 100644 --- a/Kernel/Bus/VirtIO/VirtIOQueue.cpp +++ b/Kernel/Bus/VirtIO/VirtIOQueue.cpp @@ -43,13 +43,13 @@ VirtIOQueue::~VirtIOQueue() void VirtIOQueue::enable_interrupts() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_driver->flags = 0; } void VirtIOQueue::disable_interrupts() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_driver->flags = 1; } diff --git a/Kernel/Bus/VirtIO/VirtIORNG.cpp b/Kernel/Bus/VirtIO/VirtIORNG.cpp index 90521f97e9..560ab0a075 100644 --- a/Kernel/Bus/VirtIO/VirtIORNG.cpp +++ b/Kernel/Bus/VirtIO/VirtIORNG.cpp @@ -44,7 +44,7 @@ void VirtIORNG::handle_queue_update(u16 queue_index) size_t available_entropy = 0, used; auto& queue = get_queue(REQUESTQ); { - ScopedSpinlock lock(queue.lock()); + SpinlockLocker lock(queue.lock()); auto chain = queue.pop_used_buffer_chain(used); if (chain.is_empty()) return; @@ -64,7 +64,7 @@ void VirtIORNG::handle_queue_update(u16 queue_index) void VirtIORNG::request_entropy_from_host() { auto& queue = get_queue(REQUESTQ); - ScopedSpinlock lock(queue.lock()); + SpinlockLocker lock(queue.lock()); VirtIOQueueChain chain(queue); chain.add_buffer_to_chain(m_entropy_buffer->physical_page(0)->paddr(), PAGE_SIZE, BufferType::DeviceWritable); supply_chain_and_notify(REQUESTQ, chain); diff --git a/Kernel/ConsoleDevice.cpp b/Kernel/ConsoleDevice.cpp index c9d9c08df2..282a1ae010 100644 --- a/Kernel/ConsoleDevice.cpp +++ b/Kernel/ConsoleDevice.cpp @@ -67,7 +67,7 @@ Kernel::KResultOr ConsoleDevice::write(FileDescription&, u64, const Kern void ConsoleDevice::put_char(char ch) { - Kernel::ScopedSpinlock lock(g_console_lock); + Kernel::SpinlockLocker lock(g_console_lock); #ifdef CONSOLE_OUT_TO_BOCHS_DEBUG_PORT IO::out8(IO::BOCHS_DEBUG_PORT, ch); #endif diff --git a/Kernel/CoreDump.cpp b/Kernel/CoreDump.cpp index a8350d3466..3c33ea3ae4 100644 --- a/Kernel/CoreDump.cpp +++ b/Kernel/CoreDump.cpp @@ -321,7 +321,7 @@ ByteBuffer CoreDump::create_notes_segment_data() const KResult CoreDump::write() { - ScopedSpinlock lock(m_process->address_space().get_lock()); + SpinlockLocker lock(m_process->address_space().get_lock()); ProcessPagingScope scope(m_process); ByteBuffer notes_segment = create_notes_segment_data(); diff --git a/Kernel/Devices/AsyncDeviceRequest.cpp b/Kernel/Devices/AsyncDeviceRequest.cpp index 2d8d5f41da..84524be30c 100644 --- a/Kernel/Devices/AsyncDeviceRequest.cpp +++ b/Kernel/Devices/AsyncDeviceRequest.cpp @@ -18,7 +18,7 @@ AsyncDeviceRequest::AsyncDeviceRequest(Device& device) AsyncDeviceRequest::~AsyncDeviceRequest() { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(is_completed_result(m_result)); VERIFY(m_sub_requests_pending.is_empty()); } @@ -63,7 +63,7 @@ auto AsyncDeviceRequest::wait(Time* timeout) -> RequestWaitResult auto AsyncDeviceRequest::get_request_result() const -> RequestResult { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return m_result; } @@ -74,7 +74,7 @@ void AsyncDeviceRequest::add_sub_request(NonnullRefPtr sub_r VERIFY(sub_request->m_parent_request == nullptr); sub_request->m_parent_request = this; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(!is_completed_result(m_result)); m_sub_requests_pending.append(sub_request); if (m_result == Started) @@ -85,7 +85,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request) { bool all_completed; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_result == Started); if (m_sub_requests_pending.contains(sub_request)) { @@ -131,7 +131,7 @@ void AsyncDeviceRequest::complete(RequestResult result) VERIFY(result == Success || result == Failure || result == MemoryFault); ScopedCritical critical; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_result == Started); m_result = result; } diff --git a/Kernel/Devices/AsyncDeviceRequest.h b/Kernel/Devices/AsyncDeviceRequest.h index 86ad7318d9..5d7746c6df 100644 --- a/Kernel/Devices/AsyncDeviceRequest.h +++ b/Kernel/Devices/AsyncDeviceRequest.h @@ -61,7 +61,7 @@ public: [[nodiscard]] RequestWaitResult wait(Time* = nullptr); - void do_start(ScopedSpinlock>&& requests_lock) + void do_start(SpinlockLocker>&& requests_lock) { if (is_completed_result(m_result)) return; diff --git a/Kernel/Devices/Device.cpp b/Kernel/Devices/Device.cpp index 2689dfeec8..4880f332f3 100644 --- a/Kernel/Devices/Device.cpp +++ b/Kernel/Devices/Device.cpp @@ -62,7 +62,7 @@ String Device::absolute_path(const FileDescription&) const void Device::process_next_queued_request(Badge, const AsyncDeviceRequest& completed_request) { - ScopedSpinlock lock(m_requests_lock); + SpinlockLocker lock(m_requests_lock); VERIFY(!m_requests.is_empty()); VERIFY(m_requests.first().ptr() == &completed_request); m_requests.remove(m_requests.begin()); diff --git a/Kernel/Devices/Device.h b/Kernel/Devices/Device.h index 1c12e03813..95459c8eca 100644 --- a/Kernel/Devices/Device.h +++ b/Kernel/Devices/Device.h @@ -52,7 +52,7 @@ public: NonnullRefPtr make_request(Args&&... args) { auto request = adopt_ref(*new AsyncRequestType(*this, forward(args)...)); - ScopedSpinlock lock(m_requests_lock); + SpinlockLocker lock(m_requests_lock); bool was_empty = m_requests.is_empty(); m_requests.append(request); if (was_empty) diff --git a/Kernel/Devices/HID/I8042Controller.cpp b/Kernel/Devices/HID/I8042Controller.cpp index e9ee98a928..3f11b70560 100644 --- a/Kernel/Devices/HID/I8042Controller.cpp +++ b/Kernel/Devices/HID/I8042Controller.cpp @@ -35,7 +35,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices() { u8 configuration; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // Disable devices do_wait_then_write(I8042_STATUS, 0xad); do_wait_then_write(I8042_STATUS, 0xa7); // ignored if it doesn't exist @@ -103,7 +103,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices() m_first_port_available = false; configuration &= ~1; configuration |= 1 << 4; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_wait_then_write(I8042_STATUS, 0x60); do_wait_then_write(I8042_BUFFER, configuration); } @@ -116,7 +116,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices() dbgln("I8042: Mouse device failed to initialize, disable"); m_second_port_available = false; configuration |= 1 << 5; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_wait_then_write(I8042_STATUS, 0x60); do_wait_then_write(I8042_BUFFER, configuration); } diff --git a/Kernel/Devices/HID/I8042Controller.h b/Kernel/Devices/HID/I8042Controller.h index e4983278cc..ab31ff55fa 100644 --- a/Kernel/Devices/HID/I8042Controller.h +++ b/Kernel/Devices/HID/I8042Controller.h @@ -53,36 +53,36 @@ public: bool reset_device(HIDDevice::Type device) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_reset_device(device); } u8 send_command(HIDDevice::Type device, u8 command) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_send_command(device, command); } u8 send_command(HIDDevice::Type device, u8 command, u8 data) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_send_command(device, command, data); } u8 read_from_device(HIDDevice::Type device) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_read_from_device(device); } void wait_then_write(u8 port, u8 data) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_wait_then_write(port, data); } u8 wait_then_read(u8 port) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_wait_then_read(port); } diff --git a/Kernel/Devices/HID/KeyboardDevice.cpp b/Kernel/Devices/HID/KeyboardDevice.cpp index ad81323c0e..85c5611d18 100644 --- a/Kernel/Devices/HID/KeyboardDevice.cpp +++ b/Kernel/Devices/HID/KeyboardDevice.cpp @@ -251,7 +251,7 @@ void KeyboardDevice::key_state_changed(u8 scan_code, bool pressed) HIDManagement::the().m_client->on_key_pressed(event); { - ScopedSpinlock lock(m_queue_lock); + SpinlockLocker lock(m_queue_lock); m_queue.enqueue(event); } @@ -281,7 +281,7 @@ bool KeyboardDevice::can_read(const FileDescription&, size_t) const KResultOr KeyboardDevice::read(FileDescription&, u64, UserOrKernelBuffer& buffer, size_t size) { size_t nread = 0; - ScopedSpinlock lock(m_queue_lock); + SpinlockLocker lock(m_queue_lock); while (nread < size) { if (m_queue.is_empty()) break; diff --git a/Kernel/Devices/HID/MouseDevice.cpp b/Kernel/Devices/HID/MouseDevice.cpp index 1554335ba6..80c4a1ef73 100644 --- a/Kernel/Devices/HID/MouseDevice.cpp +++ b/Kernel/Devices/HID/MouseDevice.cpp @@ -20,7 +20,7 @@ MouseDevice::~MouseDevice() bool MouseDevice::can_read(const FileDescription&, size_t) const { - ScopedSpinlock lock(m_queue_lock); + SpinlockLocker lock(m_queue_lock); return !m_queue.is_empty(); } @@ -29,7 +29,7 @@ KResultOr MouseDevice::read(FileDescription&, u64, UserOrKernelBuffer& b VERIFY(size > 0); size_t nread = 0; size_t remaining_space_in_buffer = static_cast(size) - nread; - ScopedSpinlock lock(m_queue_lock); + SpinlockLocker lock(m_queue_lock); while (!m_queue.is_empty() && remaining_space_in_buffer) { auto packet = m_queue.dequeue(); lock.unlock(); diff --git a/Kernel/Devices/HID/PS2MouseDevice.cpp b/Kernel/Devices/HID/PS2MouseDevice.cpp index c6453f41c3..15dec13ae1 100644 --- a/Kernel/Devices/HID/PS2MouseDevice.cpp +++ b/Kernel/Devices/HID/PS2MouseDevice.cpp @@ -60,7 +60,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte) m_entropy_source.add_random_event(m_data.dword); { - ScopedSpinlock lock(m_queue_lock); + SpinlockLocker lock(m_queue_lock); m_queue.enqueue(parse_data_packet(m_data)); } evaluate_block_conditions(); diff --git a/Kernel/Devices/HID/VMWareMouseDevice.cpp b/Kernel/Devices/HID/VMWareMouseDevice.cpp index a2dab03e04..46ca4b3984 100644 --- a/Kernel/Devices/HID/VMWareMouseDevice.cpp +++ b/Kernel/Devices/HID/VMWareMouseDevice.cpp @@ -36,7 +36,7 @@ void VMWareMouseDevice::irq_handle_byte_read(u8) if (mouse_packet.has_value()) { m_entropy_source.add_random_event(mouse_packet.value()); { - ScopedSpinlock lock(m_queue_lock); + SpinlockLocker lock(m_queue_lock); m_queue.enqueue(mouse_packet.value()); } evaluate_block_conditions(); diff --git a/Kernel/Devices/KCOVDevice.cpp b/Kernel/Devices/KCOVDevice.cpp index ec4a54d758..4bc5161c45 100644 --- a/Kernel/Devices/KCOVDevice.cpp +++ b/Kernel/Devices/KCOVDevice.cpp @@ -84,7 +84,7 @@ KResult KCOVDevice::ioctl(FileDescription&, unsigned request, Userspace a return ENXIO; // This proc hasn't opened the kcov dev yet auto kcov_instance = maybe_kcov_instance.value(); - ScopedSpinlock lock(kcov_instance->lock); + SpinlockLocker lock(kcov_instance->lock); switch (request) { case KCOV_SETBUFSIZE: { if (kcov_instance->state >= KCOVInstance::TRACING) { diff --git a/Kernel/Devices/SerialDevice.cpp b/Kernel/Devices/SerialDevice.cpp index 1384b9f44b..5b6b2dac3d 100644 --- a/Kernel/Devices/SerialDevice.cpp +++ b/Kernel/Devices/SerialDevice.cpp @@ -59,7 +59,7 @@ KResultOr SerialDevice::read(FileDescription&, u64, UserOrKernelBuffer& if (!size) return 0; - ScopedSpinlock lock(m_serial_lock); + SpinlockLocker lock(m_serial_lock); if (!(get_line_status() & DataReady)) return 0; @@ -80,7 +80,7 @@ KResultOr SerialDevice::write(FileDescription& description, u64, const U if (!size) return 0; - ScopedSpinlock lock(m_serial_lock); + SpinlockLocker lock(m_serial_lock); if (!can_write(description, size)) return EAGAIN; diff --git a/Kernel/FileSystem/File.h b/Kernel/FileSystem/File.h index 3fc4d419f7..cfc9bd3134 100644 --- a/Kernel/FileSystem/File.h +++ b/Kernel/FileSystem/File.h @@ -34,7 +34,7 @@ public: void unblock() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_unblock([&](auto& b, void* data, bool&) { VERIFY(b.blocker_type() == Thread::Blocker::Type::File); auto& blocker = static_cast(b); diff --git a/Kernel/FileSystem/Plan9FileSystem.cpp b/Kernel/FileSystem/Plan9FileSystem.cpp index 2bff5b8f05..b1dfa52efe 100644 --- a/Kernel/FileSystem/Plan9FileSystem.cpp +++ b/Kernel/FileSystem/Plan9FileSystem.cpp @@ -412,7 +412,7 @@ Plan9FS::ReceiveCompletion::~ReceiveCompletion() bool Plan9FS::Blocker::unblock(u16 tag) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; m_did_unblock = true; @@ -428,7 +428,7 @@ bool Plan9FS::Blocker::unblock(u16 tag) void Plan9FS::Blocker::not_blocking(bool) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return; } @@ -438,7 +438,7 @@ void Plan9FS::Blocker::not_blocking(bool) bool Plan9FS::Blocker::is_completed() const { - ScopedSpinlock lock(m_completion->lock); + SpinlockLocker lock(m_completion->lock); return m_completion->completed; } @@ -470,7 +470,7 @@ void Plan9FS::Plan9FSBlockCondition::unblock_all() void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker) { if (m_fs.is_complete(*blocker.completion())) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); blocker.unblock(blocker.completion()->tag); } } @@ -576,7 +576,7 @@ KResult Plan9FS::read_and_dispatch_one_message() auto optional_completion = m_completions.get(header.tag); if (optional_completion.has_value()) { auto completion = optional_completion.value(); - ScopedSpinlock lock(completion->lock); + SpinlockLocker lock(completion->lock); completion->result = KSuccess; completion->message = adopt_own_if_nonnull(new (nothrow) Message { buffer.release_nonnull() }); completion->completed = true; @@ -666,7 +666,7 @@ void Plan9FS::thread_main() void Plan9FS::ensure_thread() { - ScopedSpinlock lock(m_thread_lock); + SpinlockLocker lock(m_thread_lock); if (!m_thread_running.exchange(true, AK::MemoryOrder::memory_order_acq_rel)) { Process::create_kernel_process(m_thread, "Plan9FS", [&]() { thread_main(); diff --git a/Kernel/FileSystem/SysFSComponent.cpp b/Kernel/FileSystem/SysFSComponent.cpp index f773791c37..7878022111 100644 --- a/Kernel/FileSystem/SysFSComponent.cpp +++ b/Kernel/FileSystem/SysFSComponent.cpp @@ -14,7 +14,7 @@ static InodeIndex s_next_inode_index { 0 }; static size_t allocate_inode_index() { - ScopedSpinlock lock(s_index_lock); + SpinlockLocker lock(s_index_lock); s_next_inode_index = s_next_inode_index.value() + 1; VERIFY(s_next_inode_index > 0); return s_next_inode_index.value(); diff --git a/Kernel/Forward.h b/Kernel/Forward.h index e2a927bfa3..ca14baeae5 100644 --- a/Kernel/Forward.h +++ b/Kernel/Forward.h @@ -86,7 +86,7 @@ class VirtualRangeAllocator; template class Spinlock; template -class ScopedSpinlock; +class SpinlockLocker; template class KResultOr; diff --git a/Kernel/FutexQueue.cpp b/Kernel/FutexQueue.cpp index e33c83b0eb..fe7368e5d8 100644 --- a/Kernel/FutexQueue.cpp +++ b/Kernel/FutexQueue.cpp @@ -39,7 +39,7 @@ bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data) u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target) { is_empty_target = false; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count); @@ -75,7 +75,7 @@ u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function& ge lock.unlock(); did_requeue = blockers_to_requeue.size(); - ScopedSpinlock target_lock(target_futex_queue->m_lock); + SpinlockLocker target_lock(target_futex_queue->m_lock); // Now that we have the lock of the target, append the blockers // and notify them that they completed the move for (auto& info : blockers_to_requeue) { @@ -100,7 +100,7 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional& bitset, bool& is_emp is_empty = false; return 0; // should we assert instead? } - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count); u32 did_wake = 0; do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) { @@ -123,7 +123,7 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional& bitset, bool& is_emp u32 FutexQueue::wake_all(bool& is_empty) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this); u32 did_wake = 0; do_unblock([&](Thread::Blocker& b, void* data, bool&) { @@ -148,7 +148,7 @@ bool FutexQueue::is_empty_and_no_imminent_waits_locked() bool FutexQueue::queue_imminent_wait() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_was_removed) return false; m_imminent_waits++; @@ -157,7 +157,7 @@ bool FutexQueue::queue_imminent_wait() bool FutexQueue::try_remove() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_was_removed) return false; if (!is_empty_and_no_imminent_waits_locked()) @@ -168,7 +168,7 @@ bool FutexQueue::try_remove() void FutexQueue::did_remove() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_was_removed); VERIFY(is_empty_and_no_imminent_waits_locked()); } diff --git a/Kernel/FutexQueue.h b/Kernel/FutexQueue.h index 11769eec30..57f134ab10 100644 --- a/Kernel/FutexQueue.h +++ b/Kernel/FutexQueue.h @@ -37,7 +37,7 @@ public: bool is_empty_and_no_imminent_waits() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return is_empty_and_no_imminent_waits_locked(); } bool is_empty_and_no_imminent_waits_locked(); diff --git a/Kernel/GlobalProcessExposed.cpp b/Kernel/GlobalProcessExposed.cpp index e3dc1b1d8d..a6c3323afb 100644 --- a/Kernel/GlobalProcessExposed.cpp +++ b/Kernel/GlobalProcessExposed.cpp @@ -474,7 +474,7 @@ private: process_object.add("kernel", process.is_kernel_process()); auto thread_array = process_object.add_array("threads"); process.for_each_thread([&](const Thread& thread) { - ScopedSpinlock locker(thread.get_lock()); + SpinlockLocker locker(thread.get_lock()); auto thread_object = thread_array.add_object(); #if LOCK_DEBUG thread_object.add("lock_count", thread.lock_count()); @@ -500,7 +500,7 @@ private: }); }; - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); { { auto array = json.add_array("processes"); diff --git a/Kernel/Graphics/Bochs/GraphicsAdapter.cpp b/Kernel/Graphics/Bochs/GraphicsAdapter.cpp index 7d3e9364d3..8876967cba 100644 --- a/Kernel/Graphics/Bochs/GraphicsAdapter.cpp +++ b/Kernel/Graphics/Bochs/GraphicsAdapter.cpp @@ -208,7 +208,7 @@ bool BochsGraphicsAdapter::set_y_offset(size_t output_port_index, size_t y_offse void BochsGraphicsAdapter::enable_consoles() { - ScopedSpinlock lock(m_console_mode_switch_lock); + SpinlockLocker lock(m_console_mode_switch_lock); VERIFY(m_framebuffer_console); m_console_enabled = true; m_registers->bochs_regs.y_offset = 0; @@ -218,7 +218,7 @@ void BochsGraphicsAdapter::enable_consoles() } void BochsGraphicsAdapter::disable_consoles() { - ScopedSpinlock lock(m_console_mode_switch_lock); + SpinlockLocker lock(m_console_mode_switch_lock); VERIFY(m_framebuffer_console); VERIFY(m_framebuffer_device); m_console_enabled = false; diff --git a/Kernel/Graphics/Console/GenericFramebufferConsole.cpp b/Kernel/Graphics/Console/GenericFramebufferConsole.cpp index 85c438bcc8..f8d7b01a46 100644 --- a/Kernel/Graphics/Console/GenericFramebufferConsole.cpp +++ b/Kernel/Graphics/Console/GenericFramebufferConsole.cpp @@ -224,7 +224,7 @@ void GenericFramebufferConsole::show_cursor() void GenericFramebufferConsole::clear(size_t x, size_t y, size_t length) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (x == 0 && length == max_column()) { // if we need to clear the entire row, just clean it with quick memset :) auto* offset_in_framebuffer = (u32*)&framebuffer_data()[x * sizeof(u32) * 8 + y * 8 * sizeof(u32) * width()]; @@ -264,19 +264,19 @@ void GenericFramebufferConsole::clear_glyph(size_t x, size_t y) void GenericFramebufferConsole::enable() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); memset(framebuffer_data(), 0, height() * width() * sizeof(u32)); m_enabled.store(true); } void GenericFramebufferConsole::disable() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_enabled.store(false); } void GenericFramebufferConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!m_enabled.load()) return; diff --git a/Kernel/Graphics/Console/TextModeConsole.cpp b/Kernel/Graphics/Console/TextModeConsole.cpp index 404f994ec7..9df455e4bd 100644 --- a/Kernel/Graphics/Console/TextModeConsole.cpp +++ b/Kernel/Graphics/Console/TextModeConsole.cpp @@ -87,8 +87,8 @@ enum VGAColor : u8 { void TextModeConsole::set_cursor(size_t x, size_t y) { - ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); - ScopedSpinlock lock(m_vga_lock); + SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock()); + SpinlockLocker lock(m_vga_lock); m_cursor_x = x; m_cursor_y = y; u16 value = m_current_vga_start_address + (y * width() + x); @@ -99,22 +99,22 @@ void TextModeConsole::set_cursor(size_t x, size_t y) } void TextModeConsole::hide_cursor() { - ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); - ScopedSpinlock lock(m_vga_lock); + SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock()); + SpinlockLocker lock(m_vga_lock); IO::out8(0x3D4, 0xA); IO::out8(0x3D5, 0x20); } void TextModeConsole::show_cursor() { - ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); - ScopedSpinlock lock(m_vga_lock); + SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock()); + SpinlockLocker lock(m_vga_lock); IO::out8(0x3D4, 0xA); IO::out8(0x3D5, 0x20); } void TextModeConsole::clear(size_t x, size_t y, size_t length) { - ScopedSpinlock lock(m_vga_lock); + SpinlockLocker lock(m_vga_lock); auto* buf = (u16*)(m_current_vga_window + (x * 2) + (y * width() * 2)); for (size_t index = 0; index < length; index++) { buf[index] = 0x0720; @@ -127,12 +127,12 @@ void TextModeConsole::write(size_t x, size_t y, char ch, bool critical) void TextModeConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical) { - ScopedSpinlock lock(m_vga_lock); + SpinlockLocker lock(m_vga_lock); // If we are in critical printing mode, we need to handle new lines here // because there's no other responsible object to do that in the print call path if (critical && (ch == '\r' || ch == '\n')) { // Disable hardware VGA cursor - ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock()); + SpinlockLocker main_lock(GraphicsManagement::the().main_vga_lock()); IO::out8(0x3D4, 0xA); IO::out8(0x3D5, 0x20); @@ -162,7 +162,7 @@ void TextModeConsole::clear_vga_row(u16 row) void TextModeConsole::set_vga_start_row(u16 row) { - ScopedSpinlock lock(m_vga_lock); + SpinlockLocker lock(m_vga_lock); m_vga_start_row = row; m_current_vga_start_address = row * width(); m_current_vga_window = m_current_vga_window + row * width() * bytes_per_base_glyph(); diff --git a/Kernel/Graphics/FramebufferDevice.cpp b/Kernel/Graphics/FramebufferDevice.cpp index 89964ba6ad..99f5745af6 100644 --- a/Kernel/Graphics/FramebufferDevice.cpp +++ b/Kernel/Graphics/FramebufferDevice.cpp @@ -27,7 +27,7 @@ NonnullRefPtr FramebufferDevice::create(const GraphicsDevice& KResultOr FramebufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { - ScopedSpinlock lock(m_activation_lock); + SpinlockLocker lock(m_activation_lock); REQUIRE_PROMISE(video); if (!shared) return ENODEV; @@ -80,7 +80,7 @@ KResultOr FramebufferDevice::mmap(Process& process, FileDescrip void FramebufferDevice::deactivate_writes() { - ScopedSpinlock lock(m_activation_lock); + SpinlockLocker lock(m_activation_lock); if (!m_userspace_framebuffer_region) return; memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_size_in_bytes())); @@ -91,7 +91,7 @@ void FramebufferDevice::deactivate_writes() } void FramebufferDevice::activate_writes() { - ScopedSpinlock lock(m_activation_lock); + SpinlockLocker lock(m_activation_lock); if (!m_userspace_framebuffer_region || !m_real_framebuffer_vmobject) return; // restore the image we had in the void area diff --git a/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp b/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp index 81c1f0c8ab..51976a05b5 100644 --- a/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp +++ b/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp @@ -192,7 +192,7 @@ IntelNativeGraphicsAdapter::IntelNativeGraphicsAdapter(PCI::Address address) m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::ReadWrite); PCI::enable_bus_mastering(address); { - ScopedSpinlock control_lock(m_control_lock); + SpinlockLocker control_lock(m_control_lock); set_gmbus_default_rate(); set_gmbus_pin_pair(GMBusPinPair::DedicatedAnalog); } @@ -277,7 +277,7 @@ void IntelNativeGraphicsAdapter::write_to_register(IntelGraphics::RegisterIndex { VERIFY(m_control_lock.is_locked()); VERIFY(m_registers_region); - ScopedSpinlock lock(m_registers_lock); + SpinlockLocker lock(m_registers_lock); dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Write to {} value of {:x}", pci_address(), convert_register_index_to_string(index), value); auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr(); *reg = value; @@ -286,7 +286,7 @@ u32 IntelNativeGraphicsAdapter::read_from_register(IntelGraphics::RegisterIndex { VERIFY(m_control_lock.is_locked()); VERIFY(m_registers_region); - ScopedSpinlock lock(m_registers_lock); + SpinlockLocker lock(m_registers_lock); auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr(); u32 value = *reg; dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Read from {} value of {:x}", pci_address(), convert_register_index_to_string(index), value); @@ -373,7 +373,7 @@ void IntelNativeGraphicsAdapter::gmbus_read(unsigned address, u8* buf, size_t le void IntelNativeGraphicsAdapter::gmbus_read_edid() { - ScopedSpinlock control_lock(m_control_lock); + SpinlockLocker control_lock(m_control_lock); gmbus_write(DDC2_I2C_ADDRESS, 0); gmbus_read(DDC2_I2C_ADDRESS, (u8*)&m_crt_edid, sizeof(Graphics::VideoInfoBlock)); } @@ -409,8 +409,8 @@ void IntelNativeGraphicsAdapter::enable_output(PhysicalAddress fb_address, size_ bool IntelNativeGraphicsAdapter::set_crt_resolution(size_t width, size_t height) { - ScopedSpinlock control_lock(m_control_lock); - ScopedSpinlock modeset_lock(m_modeset_lock); + SpinlockLocker control_lock(m_control_lock); + SpinlockLocker modeset_lock(m_modeset_lock); if (!is_resolution_valid(width, height)) { return false; } diff --git a/Kernel/Graphics/VirtIOGPU/GPU.cpp b/Kernel/Graphics/VirtIOGPU/GPU.cpp index b2f0aab913..4cce1eda07 100644 --- a/Kernel/Graphics/VirtIOGPU/GPU.cpp +++ b/Kernel/Graphics/VirtIOGPU/GPU.cpp @@ -81,7 +81,7 @@ void GPU::handle_queue_update(u16 queue_index) VERIFY(queue_index == CONTROLQ); auto& queue = get_queue(CONTROLQ); - ScopedSpinlock queue_lock(queue.lock()); + SpinlockLocker queue_lock(queue.lock()); queue.discard_used_buffers(); m_outstanding_request.wake_all(); } @@ -242,7 +242,7 @@ void GPU::synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t re VERIFY(m_outstanding_request.is_empty()); auto& queue = get_queue(CONTROLQ); { - ScopedSpinlock lock(queue.lock()); + SpinlockLocker lock(queue.lock()); VirtIOQueueChain chain { queue }; chain.add_buffer_to_chain(buffer_start, request_size, BufferType::DeviceReadable); chain.add_buffer_to_chain(buffer_start.offset(request_size), response_size, BufferType::DeviceWritable); diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 6ee471dc42..500efef438 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -136,7 +136,7 @@ struct KmallocGlobalHeap { // onto the region. Unless we already used the backup // memory, in which case we want to use the region as the // new backup. - ScopedSpinlock lock(s_lock); + SpinlockLocker lock(s_lock); if (!m_global_heap.m_backup_memory) { if constexpr (KMALLOC_DEBUG) { dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size()); @@ -235,7 +235,7 @@ void* kmalloc_eternal(size_t size) size = round_up_to_power_of_two(size, sizeof(void*)); - ScopedSpinlock lock(s_lock); + SpinlockLocker lock(s_lock); void* ptr = s_next_eternal_ptr; s_next_eternal_ptr += size; VERIFY(s_next_eternal_ptr < s_end_of_eternal_range); @@ -246,7 +246,7 @@ void* kmalloc_eternal(size_t size) void* kmalloc(size_t size) { kmalloc_verify_nospinlock_held(); - ScopedSpinlock lock(s_lock); + SpinlockLocker lock(s_lock); ++g_kmalloc_call_count; if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) { @@ -277,7 +277,7 @@ void kfree(void* ptr) return; kmalloc_verify_nospinlock_held(); - ScopedSpinlock lock(s_lock); + SpinlockLocker lock(s_lock); ++g_kfree_call_count; ++g_nested_kfree_calls; @@ -375,7 +375,7 @@ void operator delete[](void* ptr, size_t size) noexcept void get_kmalloc_stats(kmalloc_stats& stats) { - ScopedSpinlock lock(s_lock); + SpinlockLocker lock(s_lock); stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes(); stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes(); stats.bytes_eternal = g_kmalloc_bytes_eternal; diff --git a/Kernel/Locking/Mutex.cpp b/Kernel/Locking/Mutex.cpp index 98918be46d..b7271fbf07 100644 --- a/Kernel/Locking/Mutex.cpp +++ b/Kernel/Locking/Mutex.cpp @@ -21,7 +21,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location) VERIFY(mode != Mode::Unlocked); auto current_thread = Thread::current(); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); bool did_block = false; Mode current_mode = m_mode; switch (current_mode) { @@ -145,7 +145,7 @@ void Mutex::unlock() // and also from within critical sections! VERIFY(!Processor::current().in_irq()); auto current_thread = Thread::current(); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); Mode current_mode = m_mode; if constexpr (LOCK_TRACE_DEBUG) { if (current_mode == Mode::Shared) @@ -196,7 +196,7 @@ void Mutex::unlock() } } -void Mutex::block(Thread& current_thread, Mode mode, ScopedSpinlock>& lock, u32 requested_locks) +void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker>& lock, u32 requested_locks) { auto& blocked_thread_list = thread_list_for_mode(mode); VERIFY(!blocked_thread_list.contains(current_thread)); @@ -255,7 +255,7 @@ auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode // and also from within critical sections! VERIFY(!Processor::current().in_irq()); auto current_thread = Thread::current(); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); auto current_mode = m_mode; switch (current_mode) { case Mode::Exclusive: { @@ -319,7 +319,7 @@ void Mutex::restore_lock(Mode mode, u32 lock_count, [[maybe_unused]] LockLocatio VERIFY(!Processor::current().in_irq()); auto current_thread = Thread::current(); bool did_block = false; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); switch (mode) { case Mode::Exclusive: { auto previous_mode = m_mode; diff --git a/Kernel/Locking/Mutex.h b/Kernel/Locking/Mutex.h index 582fd428f2..9eb9c6a68b 100644 --- a/Kernel/Locking/Mutex.h +++ b/Kernel/Locking/Mutex.h @@ -39,12 +39,12 @@ public: [[nodiscard]] Mode force_unlock_if_locked(u32&); [[nodiscard]] bool is_locked() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return m_mode != Mode::Unlocked; } [[nodiscard]] bool own_lock() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_mode == Mode::Exclusive) return m_holder == Thread::current(); if (m_mode == Mode::Shared) @@ -77,7 +77,7 @@ private: return mode == Mode::Exclusive ? m_blocked_threads_list_exclusive : m_blocked_threads_list_shared; } - void block(Thread&, Mode, ScopedSpinlock>&, u32); + void block(Thread&, Mode, SpinlockLocker>&, u32); void unblock_waiters(Mode); const char* m_name { nullptr }; diff --git a/Kernel/Locking/Spinlock.h b/Kernel/Locking/Spinlock.h index b9e266e2ab..f2020ef3a7 100644 --- a/Kernel/Locking/Spinlock.h +++ b/Kernel/Locking/Spinlock.h @@ -116,15 +116,14 @@ private: }; template -class [[nodiscard]] ScopedSpinlock { - - AK_MAKE_NONCOPYABLE(ScopedSpinlock); +class [[nodiscard]] SpinlockLocker { + AK_MAKE_NONCOPYABLE(SpinlockLocker); public: - ScopedSpinlock() = delete; - ScopedSpinlock& operator=(ScopedSpinlock&&) = delete; + SpinlockLocker() = delete; + SpinlockLocker& operator=(SpinlockLocker&&) = delete; - ScopedSpinlock(LockType& lock) + SpinlockLocker(LockType& lock) : m_lock(&lock) { VERIFY(m_lock); @@ -132,7 +131,7 @@ public: m_have_lock = true; } - ScopedSpinlock(ScopedSpinlock&& from) + SpinlockLocker(SpinlockLocker&& from) : m_lock(from.m_lock) , m_prev_flags(from.m_prev_flags) , m_have_lock(from.m_have_lock) @@ -142,7 +141,7 @@ public: from.m_have_lock = false; } - ~ScopedSpinlock() + ~SpinlockLocker() { if (m_lock && m_have_lock) { m_lock->unlock(m_prev_flags); diff --git a/Kernel/Locking/SpinlockProtected.h b/Kernel/Locking/SpinlockProtected.h index 9c9bc6d869..39a7b134b2 100644 --- a/Kernel/Locking/SpinlockProtected.h +++ b/Kernel/Locking/SpinlockProtected.h @@ -39,7 +39,7 @@ private: private: U& m_value; - ScopedSpinlock m_locker; + SpinlockLocker m_locker; }; auto lock_const() const { return Locked(m_value, m_spinlock); } diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index 83489e6903..dabf79d346 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -223,7 +223,7 @@ void AddressSpace::deallocate_region(Region& region) NonnullOwnPtr AddressSpace::take_region(Region& region) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_region_lookup_cache.region.unsafe_ptr() == ®ion) m_region_lookup_cache.region = nullptr; @@ -235,7 +235,7 @@ NonnullOwnPtr AddressSpace::take_region(Region& region) Region* AddressSpace::find_region_from_range(VirtualRange const& range) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) return m_region_lookup_cache.region.unsafe_ptr(); @@ -253,7 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range) Region* AddressSpace::find_region_containing(VirtualRange const& range) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); auto candidate = m_regions.find_largest_not_above(range.base().get()); if (!candidate) return nullptr; @@ -265,7 +265,7 @@ Vector AddressSpace::find_regions_intersecting(VirtualRange const& rang Vector regions = {}; size_t total_size_collected = 0; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); auto found_region = m_regions.find_largest_not_above(range.base().get()); if (!found_region) @@ -286,7 +286,7 @@ Vector AddressSpace::find_regions_intersecting(VirtualRange const& rang Region* AddressSpace::add_region(NonnullOwnPtr region) { auto* ptr = region.ptr(); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); auto success = m_regions.try_insert(region->vaddr().get(), move(region)); return success ? ptr : nullptr; } @@ -324,7 +324,7 @@ void AddressSpace::dump_regions() dbgln("BEGIN{} END{} SIZE{} ACCESS NAME", addr_padding, addr_padding, addr_padding); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); for (auto& sorted_region : m_regions) { auto& region = *sorted_region; @@ -342,13 +342,13 @@ void AddressSpace::dump_regions() void AddressSpace::remove_all_regions(Badge) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_regions.clear(); } size_t AddressSpace::amount_dirty_private() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject. // The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping. // That's probably a situation that needs to be looked at in general. @@ -362,7 +362,7 @@ size_t AddressSpace::amount_dirty_private() const size_t AddressSpace::amount_clean_inode() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); HashTable vmobjects; for (auto& region : m_regions) { if (region->vmobject().is_inode()) @@ -376,7 +376,7 @@ size_t AddressSpace::amount_clean_inode() const size_t AddressSpace::amount_virtual() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); size_t amount = 0; for (auto& region : m_regions) { amount += region->size(); @@ -386,7 +386,7 @@ size_t AddressSpace::amount_virtual() const size_t AddressSpace::amount_resident() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // FIXME: This will double count if multiple regions use the same physical page. size_t amount = 0; for (auto& region : m_regions) { @@ -397,7 +397,7 @@ size_t AddressSpace::amount_resident() const size_t AddressSpace::amount_shared() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // FIXME: This will double count if multiple regions use the same physical page. // FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts, // and each PhysicalPage is only reffed by its VMObject. This needs to be refactored @@ -411,7 +411,7 @@ size_t AddressSpace::amount_shared() const size_t AddressSpace::amount_purgeable_volatile() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); size_t amount = 0; for (auto& region : m_regions) { if (!region->vmobject().is_anonymous()) @@ -425,7 +425,7 @@ size_t AddressSpace::amount_purgeable_volatile() const size_t AddressSpace::amount_purgeable_nonvolatile() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); size_t amount = 0; for (auto& region : m_regions) { if (!region->vmobject().is_anonymous()) diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp index fb57dc5fcb..a882671f6e 100644 --- a/Kernel/Memory/AnonymousVMObject.cpp +++ b/Kernel/Memory/AnonymousVMObject.cpp @@ -16,7 +16,7 @@ namespace Kernel::Memory { KResultOr> AnonymousVMObject::try_clone() { // We need to acquire our lock so we copy a sane state - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (is_purgeable() && is_volatile()) { // If this object is purgeable+volatile, create a new zero-filled purgeable+volatile @@ -178,7 +178,7 @@ AnonymousVMObject::~AnonymousVMObject() size_t AnonymousVMObject::purge() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!is_purgeable() || !is_volatile()) return 0; @@ -206,7 +206,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged) { VERIFY(is_purgeable()); - ScopedSpinlock locker(m_lock); + SpinlockLocker locker(m_lock); was_purged = m_was_purged; if (m_volatile == is_volatile) @@ -306,7 +306,7 @@ size_t AnonymousVMObject::cow_pages() const PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr) { VERIFY_INTERRUPTS_DISABLED(); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (is_volatile()) { // A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash. @@ -379,13 +379,13 @@ AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages() NonnullRefPtr AnonymousVMObject::SharedCommittedCowPages::take_one() { - ScopedSpinlock locker(m_lock); + SpinlockLocker locker(m_lock); return m_committed_pages.take_one(); } void AnonymousVMObject::SharedCommittedCowPages::uncommit_one() { - ScopedSpinlock locker(m_lock); + SpinlockLocker locker(m_lock); m_committed_pages.uncommit_one(); } diff --git a/Kernel/Memory/InodeVMObject.cpp b/Kernel/Memory/InodeVMObject.cpp index 6d1a4025e1..0435fc9522 100644 --- a/Kernel/Memory/InodeVMObject.cpp +++ b/Kernel/Memory/InodeVMObject.cpp @@ -52,7 +52,7 @@ size_t InodeVMObject::amount_dirty() const int InodeVMObject::release_all_clean_pages() { - ScopedSpinlock locker(m_lock); + SpinlockLocker locker(m_lock); int count = 0; for (size_t i = 0; i < page_count(); ++i) { diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 17c208f0fa..469ab4d01f 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -63,7 +63,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager() { s_the = this; - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); parse_memory_map(); write_cr3(kernel_page_directory().cr3()); protect_kernel_image(); @@ -88,7 +88,7 @@ UNMAP_AFTER_INIT MemoryManager::~MemoryManager() UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image() { - ScopedSpinlock page_lock(kernel_page_directory().get_lock()); + SpinlockLocker page_lock(kernel_page_directory().get_lock()); // Disable writing to the kernel text and rodata segments. for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) { auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); @@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image() UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() { - ScopedSpinlock page_lock(kernel_page_directory().get_lock()); - ScopedSpinlock mm_lock(s_mm_lock); + SpinlockLocker page_lock(kernel_page_directory().get_lock()); + SpinlockLocker mm_lock(s_mm_lock); // Disable writing to the .ro_after_init section for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) { auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); @@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory() void MemoryManager::unmap_text_after_init() { - ScopedSpinlock page_lock(kernel_page_directory().get_lock()); - ScopedSpinlock mm_lock(s_mm_lock); + SpinlockLocker page_lock(kernel_page_directory().get_lock()); + SpinlockLocker mm_lock(s_mm_lock); auto start = page_round_down((FlatPtr)&start_of_unmap_after_init); auto end = page_round_up((FlatPtr)&end_of_unmap_after_init); @@ -135,8 +135,8 @@ void MemoryManager::unmap_text_after_init() void MemoryManager::unmap_ksyms_after_init() { - ScopedSpinlock mm_lock(s_mm_lock); - ScopedSpinlock page_lock(kernel_page_directory().get_lock()); + SpinlockLocker mm_lock(s_mm_lock); + SpinlockLocker page_lock(kernel_page_directory().get_lock()); auto start = page_round_down((FlatPtr)start_of_kernel_ksyms); auto end = page_round_up((FlatPtr)end_of_kernel_ksyms); @@ -413,7 +413,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages() // try to map the entire region into kernel space so we always have it // We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array // mapped yet so we can't create them - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); // Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array auto page_tables_base = m_physical_pages_region->lower(); @@ -612,7 +612,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu) Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); for (auto& region : MM.m_kernel_regions) { if (region.contains(vaddr)) return ®ion; @@ -628,7 +628,7 @@ Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr) { - ScopedSpinlock lock(space.get_lock()); + SpinlockLocker lock(space.get_lock()); return find_user_region_from_vaddr_no_lock(space, vaddr); } @@ -636,7 +636,7 @@ void MemoryManager::validate_syscall_preconditions(AddressSpace& space, Register { // We take the space lock once here and then use the no_lock variants // to avoid excessive spinlock recursion in this extemely common path. - ScopedSpinlock lock(space.get_lock()); + SpinlockLocker lock(space.get_lock()); auto unlock_and_handle_crash = [&lock, ®s](const char* description, int signal) { lock.unlock(); @@ -702,7 +702,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); - ScopedSpinlock lock(kernel_page_directory().get_lock()); + SpinlockLocker lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -721,7 +721,7 @@ OwnPtr MemoryManager::allocate_kernel_region(size_t size, StringView nam auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy); if (maybe_vm_object.is_error()) return {}; - ScopedSpinlock lock(kernel_page_directory().get_lock()); + SpinlockLocker lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -734,7 +734,7 @@ OwnPtr MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size if (maybe_vm_object.is_error()) return {}; VERIFY(!(size % PAGE_SIZE)); - ScopedSpinlock lock(kernel_page_directory().get_lock()); + SpinlockLocker lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -755,7 +755,7 @@ OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); - ScopedSpinlock lock(kernel_page_directory().get_lock()); + SpinlockLocker lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; @@ -765,7 +765,7 @@ OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo Optional MemoryManager::commit_user_physical_pages(size_t page_count) { VERIFY(page_count > 0); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); if (m_system_memory_info.user_physical_pages_uncommitted < page_count) return {}; @@ -778,7 +778,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge { VERIFY(page_count > 0); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count); m_system_memory_info.user_physical_pages_uncommitted += page_count; @@ -787,7 +787,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge void MemoryManager::deallocate_physical_page(PhysicalAddress paddr) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); // Are we returning a user page? for (auto& region : m_user_physical_regions) { @@ -839,7 +839,7 @@ RefPtr MemoryManager::find_free_user_physical_page(bool committed) NonnullRefPtr MemoryManager::allocate_committed_user_physical_page(Badge, ShouldZeroFill should_zero_fill) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); auto page = find_free_user_physical_page(true); if (should_zero_fill == ShouldZeroFill::Yes) { auto* ptr = quickmap_page(*page); @@ -851,7 +851,7 @@ NonnullRefPtr MemoryManager::allocate_committed_user_physical_page RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); auto page = find_free_user_physical_page(false); bool purged_pages = false; @@ -893,7 +893,7 @@ RefPtr MemoryManager::allocate_user_physical_page(ShouldZeroFill s NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size) { VERIFY(!(size % PAGE_SIZE)); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); size_t count = ceil_div(size, static_cast(PAGE_SIZE)); auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count); @@ -911,7 +911,7 @@ NonnullRefPtrVector MemoryManager::allocate_contiguous_supervisor_ RefPtr MemoryManager::allocate_supervisor_physical_page() { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); auto page = m_super_physical_region->take_free_page(); if (!page) { @@ -934,7 +934,7 @@ void MemoryManager::enter_space(AddressSpace& space) { auto current_thread = Thread::current(); VERIFY(current_thread != nullptr); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); current_thread->regs().cr3 = space.page_directory().cr3(); write_cr3(space.page_directory().cr3()); @@ -1006,7 +1006,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) VERIFY_INTERRUPTS_DISABLED(); auto& mm_data = get_data(); mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock(); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE); u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE; @@ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address) void MemoryManager::unquickmap_page() { VERIFY_INTERRUPTS_DISABLED(); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); auto& mm_data = get_data(); VERIFY(mm_data.m_quickmap_in_use.is_locked()); VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE); @@ -1049,20 +1049,20 @@ bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddr bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const { - ScopedSpinlock lock(space.get_lock()); + SpinlockLocker lock(space.get_lock()); return validate_user_stack_no_lock(space, vaddr); } void MemoryManager::register_region(Region& region) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); if (region.is_kernel()) m_kernel_regions.append(region); } void MemoryManager::unregister_region(Region& region) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); if (region.is_kernel()) m_kernel_regions.remove(region); } @@ -1077,7 +1077,7 @@ void MemoryManager::dump_kernel_regions() #endif dbgln("BEGIN{} END{} SIZE{} ACCESS NAME", addr_padding, addr_padding, addr_padding); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); for (auto& region : m_kernel_regions) { dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), @@ -1095,8 +1095,8 @@ void MemoryManager::dump_kernel_regions() void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable) { - ScopedSpinlock page_lock(kernel_page_directory().get_lock()); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker page_lock(kernel_page_directory().get_lock()); + SpinlockLocker lock(s_mm_lock); auto* pte = ensure_pte(kernel_page_directory(), vaddr); VERIFY(pte); if (pte->is_writable() == writable) diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index 75f1ee2b5b..17c4536fad 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -197,7 +197,7 @@ public: SystemMemoryInfo get_system_memory_info() { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); return m_system_memory_info; } diff --git a/Kernel/Memory/PageDirectory.cpp b/Kernel/Memory/PageDirectory.cpp index 703b45d3ae..930cc6e7fa 100644 --- a/Kernel/Memory/PageDirectory.cpp +++ b/Kernel/Memory/PageDirectory.cpp @@ -27,7 +27,7 @@ static HashMap& cr3_map() RefPtr PageDirectory::find_by_cr3(FlatPtr cr3) { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); return cr3_map().get(cr3).value_or({}); } @@ -60,7 +60,7 @@ RefPtr PageDirectory::try_create_for_userspace(VirtualRangeAlloca } // NOTE: Take the MM lock since we need it for quickmap. - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); #if ARCH(X86_64) directory->m_pml4t = MM.allocate_user_physical_page(); @@ -159,7 +159,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory() PageDirectory::~PageDirectory() { - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker lock(s_mm_lock); if (m_space) cr3_map().remove(cr3()); } diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 30f11cea9a..ad55d520df 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -43,8 +43,8 @@ Region::~Region() MM.unregister_region(*this); if (m_page_directory) { - ScopedSpinlock page_lock(m_page_directory->get_lock()); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker page_lock(m_page_directory->get_lock()); + SpinlockLocker lock(s_mm_lock); unmap(ShouldDeallocateVirtualRange::Yes); VERIFY(!m_page_directory); } @@ -183,7 +183,7 @@ bool Region::map_individual_page_impl(size_t page_index) } // NOTE: We have to take the MM lock for PTE's to stay valid while we use them. - ScopedSpinlock mm_locker(s_mm_lock); + SpinlockLocker mm_locker(s_mm_lock); auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr); if (!pte) @@ -208,12 +208,12 @@ bool Region::map_individual_page_impl(size_t page_index) bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush) { - ScopedSpinlock lock(vmobject().m_lock); + SpinlockLocker lock(vmobject().m_lock); if (!m_page_directory) return true; // not an error, region may have not yet mapped it if (!translate_vmobject_page(page_index)) return true; // not an error, region doesn't map this page - ScopedSpinlock page_lock(m_page_directory->get_lock()); + SpinlockLocker page_lock(m_page_directory->get_lock()); VERIFY(physical_page(page_index)); bool success = map_individual_page_impl(page_index); if (with_flush) @@ -236,8 +236,8 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range) { if (!m_page_directory) return; - ScopedSpinlock page_lock(m_page_directory->get_lock()); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker page_lock(m_page_directory->get_lock()); + SpinlockLocker lock(s_mm_lock); size_t count = page_count(); for (size_t i = 0; i < count; ++i) { auto vaddr = vaddr_from_page_index(i); @@ -259,8 +259,8 @@ void Region::set_page_directory(PageDirectory& page_directory) bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb) { - ScopedSpinlock page_lock(page_directory.get_lock()); - ScopedSpinlock lock(s_mm_lock); + SpinlockLocker page_lock(page_directory.get_lock()); + SpinlockLocker lock(s_mm_lock); // FIXME: Find a better place for this sanity check(?) if (is_user() && !is_shared()) { @@ -338,7 +338,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) auto& page_slot = physical_page_slot(page_index_in_region); auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region); - ScopedSpinlock locker(vmobject().m_lock); + SpinlockLocker locker(vmobject().m_lock); if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) { dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!"); @@ -401,7 +401,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject]; { - ScopedSpinlock locker(inode_vmobject.m_lock); + SpinlockLocker locker(inode_vmobject.m_lock); if (!vmobject_physical_page_entry.is_null()) { dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping."); if (!remap_vmobject_page(page_index_in_vmobject)) @@ -433,7 +433,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) memset(page_buffer + nread, 0, PAGE_SIZE - nread); } - ScopedSpinlock locker(inode_vmobject.m_lock); + SpinlockLocker locker(inode_vmobject.m_lock); if (!vmobject_physical_page_entry.is_null()) { // Someone else faulted in this page while we were reading from the inode. diff --git a/Kernel/Memory/VMObject.h b/Kernel/Memory/VMObject.h index 60addc27ad..9531c53e98 100644 --- a/Kernel/Memory/VMObject.h +++ b/Kernel/Memory/VMObject.h @@ -43,13 +43,13 @@ public: ALWAYS_INLINE void add_region(Region& region) { - ScopedSpinlock locker(m_lock); + SpinlockLocker locker(m_lock); m_regions.append(region); } ALWAYS_INLINE void remove_region(Region& region) { - ScopedSpinlock locker(m_lock); + SpinlockLocker locker(m_lock); m_regions.remove(region); } @@ -80,7 +80,7 @@ public: template inline void VMObject::for_each_region(Callback callback) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); for (auto& region : m_regions) { callback(region); } diff --git a/Kernel/Memory/VirtualRangeAllocator.cpp b/Kernel/Memory/VirtualRangeAllocator.cpp index 351ec0cd8b..c9f1dbad29 100644 --- a/Kernel/Memory/VirtualRangeAllocator.cpp +++ b/Kernel/Memory/VirtualRangeAllocator.cpp @@ -25,7 +25,7 @@ void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t si void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator) { - ScopedSpinlock lock(parent_allocator.m_lock); + SpinlockLocker lock(parent_allocator.m_lock); m_total_range = parent_allocator.m_total_range; m_available_ranges.clear(); for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) { @@ -103,7 +103,7 @@ Optional VirtualRangeAllocator::allocate_anywhere(size_t size, siz if (Checked::addition_would_overflow(effective_size, alignment)) return {}; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) { auto& available_range = *it; @@ -142,7 +142,7 @@ Optional VirtualRangeAllocator::allocate_specific(VirtualAddress b return {}; } - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) { auto& available_range = *it; if (!available_range.contains(base, size)) @@ -159,7 +159,7 @@ Optional VirtualRangeAllocator::allocate_specific(VirtualAddress b void VirtualRangeAllocator::deallocate(VirtualRange const& range) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_total_range.contains(range)); VERIFY(range.size()); VERIFY((range.size() % PAGE_SIZE) == 0); diff --git a/Kernel/Net/Routing.cpp b/Kernel/Net/Routing.cpp index 9388334230..84d9933cf6 100644 --- a/Kernel/Net/Routing.cpp +++ b/Kernel/Net/Routing.cpp @@ -34,7 +34,7 @@ public: return false; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; m_did_unblock = true; @@ -97,7 +97,7 @@ void ARPTableBlocker::not_blocking(bool timeout_in_past) return table.get(ip_addr()); }); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!m_did_unblock) { m_did_unblock = true; m_addr = move(addr); diff --git a/Kernel/PerformanceEventBuffer.cpp b/Kernel/PerformanceEventBuffer.cpp index 003b059560..7914d07294 100644 --- a/Kernel/PerformanceEventBuffer.cpp +++ b/Kernel/PerformanceEventBuffer.cpp @@ -282,7 +282,7 @@ OwnPtr PerformanceEventBuffer::try_create_with_size(size void PerformanceEventBuffer::add_process(const Process& process, ProcessEventType event_type) { - ScopedSpinlock locker(process.address_space().get_lock()); + SpinlockLocker locker(process.address_space().get_lock()); String executable; if (process.executable()) diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 8641b710c1..6db7f71e59 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -205,7 +205,7 @@ RefPtr Process::create_kernel_process(RefPtr& first_thread, Str if (do_register == RegisterProcess::Yes) register_new(*process); - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); first_thread->set_affinity(affinity); first_thread->set_state(Thread::State::Runnable); return process; @@ -429,7 +429,7 @@ RefPtr Process::from_pid(ProcessID pid) const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i) const { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); if (m_fds_metadatas.size() <= i) return nullptr; @@ -440,7 +440,7 @@ const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid( } Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i) { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); if (m_fds_metadatas.size() <= i) return nullptr; @@ -452,20 +452,20 @@ Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t const Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i) const { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); VERIFY(m_fds_metadatas[i].is_allocated()); return m_fds_metadatas[i]; } Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i) { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); VERIFY(m_fds_metadatas[i].is_allocated()); return m_fds_metadatas[i]; } RefPtr Process::FileDescriptions::file_description(int fd) const { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); if (fd < 0) return nullptr; if (static_cast(fd) < m_fds_metadatas.size()) @@ -475,7 +475,7 @@ RefPtr Process::FileDescriptions::file_description(int fd) cons void Process::FileDescriptions::enumerate(Function callback) const { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); for (auto& file_description_metadata : m_fds_metadatas) { callback(file_description_metadata); } @@ -483,7 +483,7 @@ void Process::FileDescriptions::enumerate(Function callback) { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); for (auto& file_description_metadata : m_fds_metadatas) { callback(file_description_metadata); } @@ -501,7 +501,7 @@ size_t Process::FileDescriptions::open_count() const KResultOr Process::FileDescriptions::allocate(int first_candidate_fd) { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); for (size_t i = first_candidate_fd; i < max_open(); ++i) { if (!m_fds_metadatas[i].is_allocated()) { m_fds_metadatas[i].allocate(); @@ -771,7 +771,7 @@ RefPtr Process::create_kernel_thread(void (*entry)(void*), void* entry_d regs.set_ip((FlatPtr)entry); regs.set_sp((FlatPtr)entry_data); // entry function argument is expected to be in the SP register - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); thread->set_state(Thread::State::Runnable); return thread; } diff --git a/Kernel/Process.h b/Kernel/Process.h index 71e6979245..47e65adbbe 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -636,7 +636,7 @@ public: KResult try_clone(const Kernel::Process::FileDescriptions& other) { - ScopedSpinlock lock_other(other.m_fds_lock); + SpinlockLocker lock_other(other.m_fds_lock); if (!try_resize(other.m_fds_metadatas.size())) return ENOMEM; @@ -667,7 +667,7 @@ public: void clear() { - ScopedSpinlock lock(m_fds_lock); + SpinlockLocker lock(m_fds_lock); m_fds_metadatas.clear(); } diff --git a/Kernel/ProcessExposed.cpp b/Kernel/ProcessExposed.cpp index a234efd645..e8a9cdc218 100644 --- a/Kernel/ProcessExposed.cpp +++ b/Kernel/ProcessExposed.cpp @@ -71,7 +71,7 @@ InodeIndex build_segmented_index_for_file_description(ProcessID pid, unsigned fd static size_t s_allocate_global_inode_index() { - ScopedSpinlock lock(s_index_lock); + SpinlockLocker lock(s_index_lock); s_next_inode_index = s_next_inode_index.value() + 1; // Note: Global ProcFS indices must be above 0 and up to maximum of what 36 bit (2 ^ 36 - 1) can represent. VERIFY(s_next_inode_index > 0); diff --git a/Kernel/ProcessSpecificExposed.cpp b/Kernel/ProcessSpecificExposed.cpp index e0a5ff05fc..dbd9a716a5 100644 --- a/Kernel/ProcessSpecificExposed.cpp +++ b/Kernel/ProcessSpecificExposed.cpp @@ -211,7 +211,7 @@ KResult Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder) const { JsonArraySerializer array { builder }; { - ScopedSpinlock lock(address_space().get_lock()); + SpinlockLocker lock(address_space().get_lock()); for (auto& region : address_space().regions()) { if (!region->is_user() && !Process::current().is_superuser()) continue; diff --git a/Kernel/Random.cpp b/Kernel/Random.cpp index 3d558284e9..6f77f40873 100644 --- a/Kernel/Random.cpp +++ b/Kernel/Random.cpp @@ -70,7 +70,7 @@ UNMAP_AFTER_INIT KernelRng::KernelRng() void KernelRng::wait_for_entropy() { - ScopedSpinlock lock(get_lock()); + SpinlockLocker lock(get_lock()); if (!resource().is_ready()) { dbgln("Entropy starvation..."); m_seed_queue.wait_forever("KernelRng"); diff --git a/Kernel/Random.h b/Kernel/Random.h index 4743908599..4e62badaa7 100644 --- a/Kernel/Random.h +++ b/Kernel/Random.h @@ -37,7 +37,7 @@ public: bool get_random_bytes(u8* buffer, size_t n) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!is_ready()) return false; if (m_p0_len >= reseed_threshold) { @@ -156,7 +156,7 @@ public: void add_random_event(const T& event_data) { auto& kernel_rng = KernelRng::the(); - ScopedSpinlock lock(kernel_rng.get_lock()); + SpinlockLocker lock(kernel_rng.get_lock()); // We don't lock this because on the off chance a pool is corrupted, entropy isn't lost. Event event = { read_tsc(), m_source, event_data }; kernel_rng.resource().add_random_event(event, m_pool); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 6d5c119369..8f19be9dad 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -227,7 +227,7 @@ bool Scheduler::pick_next() scheduler_data.in_scheduler = false; }); - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); if constexpr (SCHEDULER_RUNNABLE_DEBUG) { dump_thread_list(); @@ -347,7 +347,7 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first) // Check if we have any signals we should deliver (even if we don't // end up switching to another thread). if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode && current_thread->current_trap()) { - ScopedSpinlock lock(current_thread->get_lock()); + SpinlockLocker lock(current_thread->get_lock()); if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) { current_thread->dispatch_one_pending_signal(); } @@ -485,7 +485,7 @@ void Scheduler::timer_tick(const RegisterState& regs) } if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) { - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread); current_thread->set_state(Thread::Dying); Processor::current().invoke_scheduler_async(); diff --git a/Kernel/Storage/AHCIPort.cpp b/Kernel/Storage/AHCIPort.cpp index 5f2ca0d4ce..6e176fee0e 100644 --- a/Kernel/Storage/AHCIPort.cpp +++ b/Kernel/Storage/AHCIPort.cpp @@ -124,7 +124,7 @@ bool AHCIPort::is_interrupts_enabled() const void AHCIPort::recover_from_fatal_error() { MutexLocker locker(m_lock); - ScopedSpinlock lock(m_hard_lock); + SpinlockLocker lock(m_hard_lock); dmesgln("{}: AHCI Port {} fatal error, shutting down!", m_parent_handler->hba_controller()->pci_address(), representative_port_index()); dmesgln("{}: AHCI Port {} fatal error, SError {}", m_parent_handler->hba_controller()->pci_address(), representative_port_index(), (u32)m_port_registers.serr); stop_command_list_processing(); @@ -208,7 +208,7 @@ void AHCIPort::eject() bool AHCIPort::reset() { MutexLocker locker(m_lock); - ScopedSpinlock lock(m_hard_lock); + SpinlockLocker lock(m_hard_lock); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Resetting", representative_port_index()); @@ -233,12 +233,12 @@ bool AHCIPort::reset() bool AHCIPort::initialize_without_reset() { MutexLocker locker(m_lock); - ScopedSpinlock lock(m_hard_lock); + SpinlockLocker lock(m_hard_lock); dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status()); return initialize(lock); } -bool AHCIPort::initialize(ScopedSpinlock>& main_lock) +bool AHCIPort::initialize(SpinlockLocker>& main_lock) { VERIFY(m_lock.is_locked()); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Initialization. Signature = {:#08x}", representative_port_index(), static_cast(m_port_registers.sig)); @@ -504,7 +504,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64 VERIFY(is_operable()); VERIFY(m_lock.is_locked()); VERIFY(m_current_scatter_list); - ScopedSpinlock lock(m_hard_lock); + SpinlockLocker lock(m_hard_lock); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {}", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count); if (!spin_until_ready()) @@ -591,7 +591,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64 return true; } -bool AHCIPort::identify_device(ScopedSpinlock>& main_lock) +bool AHCIPort::identify_device(SpinlockLocker>& main_lock) { VERIFY(m_lock.is_locked()); VERIFY(is_operable()); @@ -654,7 +654,7 @@ bool AHCIPort::identify_device(ScopedSpinlock>& main_lock) bool AHCIPort::shutdown() { MutexLocker locker(m_lock); - ScopedSpinlock lock(m_hard_lock); + SpinlockLocker lock(m_hard_lock); rebase(); set_interface_state(AHCI::DeviceDetectionInitialization::DisableInterface); return true; @@ -740,7 +740,7 @@ void AHCIPort::stop_fis_receiving() const m_port_registers.cmd = m_port_registers.cmd & 0xFFFFFFEF; } -bool AHCIPort::initiate_sata_reset(ScopedSpinlock>& main_lock) +bool AHCIPort::initiate_sata_reset(SpinlockLocker>& main_lock) { VERIFY(m_lock.is_locked()); VERIFY(m_hard_lock.is_locked()); diff --git a/Kernel/Storage/AHCIPort.h b/Kernel/Storage/AHCIPort.h index e0b844434f..4da795dce6 100644 --- a/Kernel/Storage/AHCIPort.h +++ b/Kernel/Storage/AHCIPort.h @@ -51,7 +51,7 @@ public: private: bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; } - bool initialize(ScopedSpinlock>&); + bool initialize(SpinlockLocker>&); UNMAP_AFTER_INIT AHCIPort(const AHCIPortHandler&, volatile AHCI::PortRegisters&, u32 port_index); @@ -62,7 +62,7 @@ private: const char* try_disambiguate_sata_status(); void try_disambiguate_sata_error(); - bool initiate_sata_reset(ScopedSpinlock>&); + bool initiate_sata_reset(SpinlockLocker>&); void rebase(); void recover_from_fatal_error(); bool shutdown(); @@ -79,7 +79,7 @@ private: bool spin_until_ready() const; - bool identify_device(ScopedSpinlock>&); + bool identify_device(SpinlockLocker>&); ALWAYS_INLINE void start_command_list_processing() const; ALWAYS_INLINE void mark_command_header_ready_to_process(u8 command_header_index) const; diff --git a/Kernel/Storage/BMIDEChannel.cpp b/Kernel/Storage/BMIDEChannel.cpp index 86082be77a..3a8a85125d 100644 --- a/Kernel/Storage/BMIDEChannel.cpp +++ b/Kernel/Storage/BMIDEChannel.cpp @@ -80,7 +80,7 @@ bool BMIDEChannel::handle_irq(const RegisterState&) // clear bus master interrupt status m_io_group.bus_master_base().value().offset(2).out(m_io_group.bus_master_base().value().offset(2).in() | 4); - ScopedSpinlock lock(m_request_lock); + SpinlockLocker lock(m_request_lock); dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}", (status & ATA_SR_DRQ) != 0, (status & ATA_SR_BSY) != 0, @@ -116,7 +116,7 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re // before Processor::deferred_call_queue returns! g_io_work->queue([this, result]() { dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result); - ScopedSpinlock lock(m_request_lock); + SpinlockLocker lock(m_request_lock); VERIFY(m_current_request); auto current_request = m_current_request; m_current_request.clear(); @@ -146,7 +146,7 @@ void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities) VERIFY(!m_current_request.is_null()); VERIFY(m_current_request->block_count() <= 256); - ScopedSpinlock m_lock(m_request_lock); + SpinlockLocker m_lock(m_request_lock); dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count()); prdt().offset = m_dma_buffer_page->paddr().get(); @@ -194,7 +194,7 @@ void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities) VERIFY(!m_current_request.is_null()); VERIFY(m_current_request->block_count() <= 256); - ScopedSpinlock m_lock(m_request_lock); + SpinlockLocker m_lock(m_request_lock); dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count()); // Note: This is a fix for a quirk for an IDE controller on ICH7 machine. diff --git a/Kernel/Storage/IDEChannel.cpp b/Kernel/Storage/IDEChannel.cpp index 1a98b88a78..b5933b7a60 100644 --- a/Kernel/Storage/IDEChannel.cpp +++ b/Kernel/Storage/IDEChannel.cpp @@ -197,7 +197,7 @@ bool IDEChannel::handle_irq(const RegisterState&) m_entropy_source.add_random_event(status); - ScopedSpinlock lock(m_request_lock); + SpinlockLocker lock(m_request_lock); dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}", (status & ATA_SR_DRQ) != 0, (status & ATA_SR_BSY) != 0, @@ -223,7 +223,7 @@ bool IDEChannel::handle_irq(const RegisterState&) // trigger page faults g_io_work->queue([this]() { MutexLocker locker(m_lock); - ScopedSpinlock lock(m_request_lock); + SpinlockLocker lock(m_request_lock); if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) { dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count()); @@ -498,7 +498,7 @@ void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities) VERIFY(!m_current_request.is_null()); VERIFY(m_current_request->block_count() <= 256); - ScopedSpinlock m_lock(m_request_lock); + SpinlockLocker m_lock(m_request_lock); dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors"); dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index()); ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities); @@ -536,7 +536,7 @@ void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities) VERIFY(!m_current_request.is_null()); VERIFY(m_current_request->block_count() <= 256); - ScopedSpinlock m_lock(m_request_lock); + SpinlockLocker m_lock(m_request_lock); dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index()); ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities); ata_do_write_sector(); diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index d5d2f2ff52..bd7ed2b706 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -682,7 +682,7 @@ KResult Process::do_exec(NonnullRefPtr main_program_description } { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); new_main_thread->set_state(Thread::State::Runnable); } u32 lock_count_to_restore; diff --git a/Kernel/Syscalls/fork.cpp b/Kernel/Syscalls/fork.cpp index 2ae0d17eae..bc4af4b294 100644 --- a/Kernel/Syscalls/fork.cpp +++ b/Kernel/Syscalls/fork.cpp @@ -93,7 +93,7 @@ KResultOr Process::sys$fork(RegisterState& regs) #endif { - ScopedSpinlock lock(address_space().get_lock()); + SpinlockLocker lock(address_space().get_lock()); for (auto& region : address_space().regions()) { dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr()); auto maybe_region_clone = region->try_clone(); @@ -120,7 +120,7 @@ KResultOr Process::sys$fork(RegisterState& regs) PerformanceManager::add_process_created_event(*child); - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); child_first_thread->set_affinity(Thread::current()->affinity()); child_first_thread->set_state(Thread::State::Runnable); diff --git a/Kernel/Syscalls/futex.cpp b/Kernel/Syscalls/futex.cpp index b945ed302a..2669f78784 100644 --- a/Kernel/Syscalls/futex.cpp +++ b/Kernel/Syscalls/futex.cpp @@ -13,7 +13,7 @@ namespace Kernel { void Process::clear_futex_queues_on_exec() { - ScopedSpinlock lock(m_futex_lock); + SpinlockLocker lock(m_futex_lock); for (auto& it : m_futex_queues) { bool did_wake_all; it.value->wake_all(did_wake_all); @@ -88,7 +88,7 @@ KResultOr Process::sys$futex(Userspace auto do_wake = [&](FlatPtr user_address, u32 count, Optional bitmask) -> int { if (count == 0) return 0; - ScopedSpinlock locker(m_futex_lock); + SpinlockLocker locker(m_futex_lock); auto futex_queue = find_futex_queue(user_address, false); if (!futex_queue) return 0; @@ -117,7 +117,7 @@ KResultOr Process::sys$futex(Userspace } atomic_thread_fence(AK::MemoryOrder::memory_order_acquire); - ScopedSpinlock locker(m_futex_lock); + SpinlockLocker locker(m_futex_lock); did_create = false; futex_queue = find_futex_queue(user_address, true, &did_create); VERIFY(futex_queue); @@ -130,7 +130,7 @@ KResultOr Process::sys$futex(Userspace Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset); - ScopedSpinlock locker(m_futex_lock); + SpinlockLocker locker(m_futex_lock); if (futex_queue->is_empty_and_no_imminent_waits()) { // If there are no more waiters, we want to get rid of the futex! remove_futex_queue(user_address); @@ -150,7 +150,7 @@ KResultOr Process::sys$futex(Userspace atomic_thread_fence(AK::MemoryOrder::memory_order_acquire); int woken_or_requeued = 0; - ScopedSpinlock locker(m_futex_lock); + SpinlockLocker locker(m_futex_lock); if (auto futex_queue = find_futex_queue(user_address, false)) { RefPtr target_futex_queue; bool is_empty, is_target_empty; diff --git a/Kernel/Syscalls/profiling.cpp b/Kernel/Syscalls/profiling.cpp index a2b932e85e..3580ba8910 100644 --- a/Kernel/Syscalls/profiling.cpp +++ b/Kernel/Syscalls/profiling.cpp @@ -31,7 +31,7 @@ KResultOr Process::sys$profiling_enable(pid_t pid, u64 event_mask) else g_global_perf_events = PerformanceEventBuffer::try_create_with_size(32 * MiB).leak_ptr(); - ScopedSpinlock lock(g_profiling_lock); + SpinlockLocker lock(g_profiling_lock); if (!TimeManagement::the().enable_profile_timer()) return ENOTSUP; g_profiling_all_threads = true; @@ -51,7 +51,7 @@ KResultOr Process::sys$profiling_enable(pid_t pid, u64 event_mask) return ESRCH; if (!is_superuser() && process->uid() != euid()) return EPERM; - ScopedSpinlock lock(g_profiling_lock); + SpinlockLocker lock(g_profiling_lock); g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP; process->set_profiling(true); if (!process->create_perf_events_buffer_if_needed()) { @@ -86,7 +86,7 @@ KResultOr Process::sys$profiling_disable(pid_t pid) return ESRCH; if (!is_superuser() && process->uid() != euid()) return EPERM; - ScopedSpinlock lock(g_profiling_lock); + SpinlockLocker lock(g_profiling_lock); if (!process->is_profiling()) return EINVAL; // FIXME: If we enabled the profile timer and it's not supported, how do we disable it now? @@ -122,7 +122,7 @@ KResultOr Process::sys$profiling_free_buffer(pid_t pid) return ESRCH; if (!is_superuser() && process->uid() != euid()) return EPERM; - ScopedSpinlock lock(g_profiling_lock); + SpinlockLocker lock(g_profiling_lock); if (process->is_profiling()) return EINVAL; process->delete_perf_events_buffer(); diff --git a/Kernel/Syscalls/ptrace.cpp b/Kernel/Syscalls/ptrace.cpp index 2e951502f0..d7be01114e 100644 --- a/Kernel/Syscalls/ptrace.cpp +++ b/Kernel/Syscalls/ptrace.cpp @@ -18,7 +18,7 @@ namespace Kernel { static KResultOr handle_ptrace(const Kernel::Syscall::SC_ptrace_params& params, Process& caller) { - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); if (params.request == PT_TRACE_ME) { if (Process::current().tracer()) return EBUSY; @@ -55,7 +55,7 @@ static KResultOr handle_ptrace(const Kernel::Syscall::SC_ptrace_params& par auto result = peer_process.start_tracing_from(caller.pid()); if (result.is_error()) return result.error(); - ScopedSpinlock lock(peer->get_lock()); + SpinlockLocker lock(peer->get_lock()); if (peer->state() != Thread::State::Stopped) { peer->send_signal(SIGSTOP, &caller); } diff --git a/Kernel/Syscalls/sched.cpp b/Kernel/Syscalls/sched.cpp index cffd1e2c55..0d9ff0a3bc 100644 --- a/Kernel/Syscalls/sched.cpp +++ b/Kernel/Syscalls/sched.cpp @@ -28,7 +28,7 @@ KResultOr Process::sys$sched_setparam(int pid, Userspace Process::sys$sched_getparam(pid_t pid, Userspace Process::sys$create_thread(void* (*entry)(void*), Userspaceset_priority(requested_thread_priority); thread->set_state(Thread::State::Runnable); return thread->tid().value(); @@ -207,7 +207,7 @@ KResultOr Process::sys$get_thread_name(pid_t tid, Userspace buff if (!thread || thread->pid() != pid()) return ESRCH; - ScopedSpinlock locker(thread->get_lock()); + SpinlockLocker locker(thread->get_lock()); auto thread_name = thread->name(); if (thread_name.is_null()) { diff --git a/Kernel/TTY/ConsoleManagement.cpp b/Kernel/TTY/ConsoleManagement.cpp index 02a6a21e67..038fba66d4 100644 --- a/Kernel/TTY/ConsoleManagement.cpp +++ b/Kernel/TTY/ConsoleManagement.cpp @@ -59,7 +59,7 @@ UNMAP_AFTER_INIT void ConsoleManagement::initialize() PANIC("Switch to tty value is invalid: {} ", tty_number); } m_active_console = &m_consoles[tty_number]; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_active_console->set_active(true); if (!m_active_console->is_graphical()) m_active_console->clear(); @@ -67,7 +67,7 @@ UNMAP_AFTER_INIT void ConsoleManagement::initialize() void ConsoleManagement::switch_to(unsigned index) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_active_console); VERIFY(index < m_consoles.size()); if (m_active_console->index() == index) diff --git a/Kernel/TTY/VirtualConsole.cpp b/Kernel/TTY/VirtualConsole.cpp index 48fcc79a31..410fbe9117 100644 --- a/Kernel/TTY/VirtualConsole.cpp +++ b/Kernel/TTY/VirtualConsole.cpp @@ -259,7 +259,7 @@ void VirtualConsole::on_key_pressed(KeyEvent event) KResultOr VirtualConsole::on_tty_write(const UserOrKernelBuffer& data, size_t size) { - ScopedSpinlock global_lock(ConsoleManagement::the().tty_write_lock()); + SpinlockLocker global_lock(ConsoleManagement::the().tty_write_lock()); auto result = data.read_buffered<512>(size, [&](u8 const* buffer, size_t buffer_bytes) { for (size_t i = 0; i < buffer_bytes; ++i) m_console_impl.on_input(buffer[i]); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 0acd8f03c5..08bdd31f04 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -147,7 +147,7 @@ Thread::~Thread() // Specifically, if this is the last thread of a process, checking // block conditions would access m_process, which would be in // the middle of being destroyed. - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); VERIFY(!m_process_thread_list_node.is_in_list()); // We shouldn't be queued @@ -155,16 +155,16 @@ Thread::~Thread() } } -void Thread::block(Kernel::Mutex& lock, ScopedSpinlock>& lock_lock, u32 lock_count) +void Thread::block(Kernel::Mutex& lock, SpinlockLocker>& lock_lock, u32 lock_count) { VERIFY(!Processor::current().in_irq()); VERIFY(this == Thread::current()); ScopedCritical critical; VERIFY(!Memory::s_mm_lock.own_lock()); - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); switch (state()) { case Thread::Stopped: @@ -212,7 +212,7 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinlock>& lock_lock, } VERIFY(Processor::in_critical()); - ScopedSpinlock block_lock2(m_block_lock); + SpinlockLocker block_lock2(m_block_lock); if (should_be_stopped() || state() == Stopped) { dbgln("Thread should be stopped, current state: {}", state_string()); set_state(Thread::Blocked); @@ -229,14 +229,14 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinlock>& lock_lock, u32 Thread::unblock_from_lock(Kernel::Mutex& lock) { - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); VERIFY(m_blocking_lock == &lock); auto requested_count = m_lock_requested_count; block_lock.unlock(); auto do_unblock = [&]() { - ScopedSpinlock scheduler_lock(g_scheduler_lock); - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); + SpinlockLocker block_lock(m_block_lock); VERIFY(m_blocking_lock == &lock); VERIFY(!Processor::current().in_irq()); VERIFY(g_scheduler_lock.own_lock()); @@ -265,8 +265,8 @@ u32 Thread::unblock_from_lock(Kernel::Mutex& lock) void Thread::unblock_from_blocker(Blocker& blocker) { auto do_unblock = [&]() { - ScopedSpinlock scheduler_lock(g_scheduler_lock); - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); + SpinlockLocker block_lock(m_block_lock); if (m_blocker != &blocker) return; if (!should_be_stopped() && !is_stopped()) @@ -322,7 +322,7 @@ void Thread::set_should_die() // Remember that we should die instead of returning to // the userspace. - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); m_should_die = true; // NOTE: Even the current thread can technically be in "Stopped" @@ -337,7 +337,7 @@ void Thread::set_should_die() resume_from_stopped(); } if (is_blocked()) { - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); if (m_blocker) { // We're blocked in the kernel. m_blocker->set_interrupted_by_death(); @@ -359,7 +359,7 @@ void Thread::die_if_needed() dbgln_if(THREAD_DEBUG, "Thread {} is dying", *this); { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); // It's possible that we don't reach the code after this block if the // scheduler is invoked and FinalizerTask cleans up this thread, however // that doesn't matter because we're trying to invoke the scheduler anyway @@ -478,7 +478,7 @@ StringView Thread::state_string() const case Thread::Stopped: return "Stopped"sv; case Thread::Blocked: { - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); if (m_blocking_lock) return "Mutex"sv; if (m_blocker) @@ -498,7 +498,7 @@ void Thread::finalize() VERIFY(!m_lock.own_lock()); if (lock_count() > 0) { dbgln("Thread {} leaking {} Locks!", *this, lock_count()); - ScopedSpinlock list_lock(m_holding_locks_lock); + SpinlockLocker list_lock(m_holding_locks_lock); for (auto& info : m_holding_locks_list) { const auto& location = info.lock_location; dbgln(" - Mutex: \"{}\" @ {} locked in function \"{}\" at \"{}:{}\" with a count of: {}", info.lock->name(), info.lock, location.function_name(), location.filename(), location.line_number(), info.count); @@ -508,7 +508,7 @@ void Thread::finalize() #endif { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); dbgln_if(THREAD_DEBUG, "Finalizing thread {}", *this); set_state(Thread::State::Dead); m_join_condition.thread_finalizing(); @@ -533,7 +533,7 @@ void Thread::finalize_dying_threads() VERIFY(Thread::current() == g_finalizer); Vector dying_threads; { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); for_each_in_state(Thread::State::Dying, [&](Thread& thread) { if (thread.is_finalizable()) dying_threads.append(&thread); @@ -566,7 +566,7 @@ void Thread::update_time_scheduled(u64 current_scheduler_time, bool is_kernel, b Scheduler::add_time_scheduled(delta, is_kernel); auto& total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user; - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); total_time += delta; } } @@ -592,9 +592,9 @@ void Thread::check_dispatch_pending_signal() { auto result = DispatchSignalResult::Continue; { - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); if (pending_signals_for_state()) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); result = dispatch_one_pending_signal(); } } @@ -610,7 +610,7 @@ void Thread::check_dispatch_pending_signal() u32 Thread::pending_signals() const { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); return pending_signals_for_state(); } @@ -626,7 +626,7 @@ u32 Thread::pending_signals_for_state() const void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) { VERIFY(signal < 32); - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); // FIXME: Figure out what to do for masked signals. Should we also ignore them here? if (should_ignore_signal(signal)) { @@ -645,13 +645,13 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release); if (m_state == Stopped) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (pending_signals_for_state()) { dbgln_if(SIGNAL_DEBUG, "Signal: Resuming stopped {} to deliver signal {}", *this, signal); resume_from_stopped(); } } else { - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); dbgln_if(SIGNAL_DEBUG, "Signal: Unblocking {} to deliver signal {}", *this, signal); unblock(signal); } @@ -659,7 +659,7 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender) u32 Thread::update_signal_mask(u32 signal_mask) { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); auto previous_signal_mask = m_signal_mask; m_signal_mask = signal_mask; m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release); @@ -668,13 +668,13 @@ u32 Thread::update_signal_mask(u32 signal_mask) u32 Thread::signal_mask() const { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); return m_signal_mask; } u32 Thread::signal_mask_block(sigset_t signal_set, bool block) { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); auto previous_signal_mask = m_signal_mask; if (block) m_signal_mask &= ~signal_set; @@ -686,7 +686,7 @@ u32 Thread::signal_mask_block(sigset_t signal_set, bool block) void Thread::clear_signals() { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); m_signal_mask = 0; m_pending_signals = 0; m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release); @@ -704,7 +704,7 @@ void Thread::send_urgent_signal_to_self(u8 signal) VERIFY(Thread::current() == this); DispatchSignalResult result; { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); result = dispatch_signal(signal); } if (result == DispatchSignalResult::Yield) @@ -730,8 +730,8 @@ DispatchSignalResult Thread::dispatch_one_pending_signal() DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal) { VERIFY(signal != 0); - ScopedSpinlock scheduler_lock(g_scheduler_lock); - ScopedSpinlock lock(m_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); + SpinlockLocker lock(m_lock); u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask; if (!(signal_candidates & (1 << (signal - 1)))) return DispatchSignalResult::Continue; @@ -821,7 +821,7 @@ void Thread::resume_from_stopped() VERIFY(m_stop_state != State::Invalid); VERIFY(g_scheduler_lock.own_lock()); if (m_stop_state == Blocked) { - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); if (m_blocker || m_blocking_lock) { // Hasn't been unblocked yet set_state(Blocked, 0); @@ -1055,7 +1055,7 @@ void Thread::set_state(State new_state, u8 stop_signal) return; { - ScopedSpinlock thread_lock(m_lock); + SpinlockLocker thread_lock(m_lock); previous_state = m_state; if (previous_state == Invalid) { // If we were *just* created, we may have already pending signals diff --git a/Kernel/Thread.h b/Kernel/Thread.h index beeb91103f..3143186680 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -177,13 +177,13 @@ public: void detach() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_is_joinable = false; } [[nodiscard]] bool is_joinable() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return m_is_joinable; } @@ -200,7 +200,7 @@ public: void set_name(OwnPtr name) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_name = move(name); } @@ -309,28 +309,28 @@ public: virtual void was_unblocked(bool did_timeout) { if (did_timeout) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_did_timeout = true; } } void set_interrupted_by_death() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_set_interrupted_by_death(); } void set_interrupted_by_signal(u8 signal) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_set_interrupted_by_signal(signal); } u8 was_interrupted_by_signal() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_get_interrupted_by_signal(); } virtual Thread::BlockResult block_result() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_was_interrupted_by_death) return Thread::BlockResult::InterruptedByDeath; if (m_was_interrupted_by_signal != 0) @@ -370,7 +370,7 @@ public: RefPtr thread; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_is_blocking) { m_is_blocking = false; VERIFY(m_blocked_thread); @@ -409,13 +409,13 @@ public: virtual ~BlockCondition() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_blockers.is_empty()); } bool add_blocker(Blocker& blocker, void* data) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!should_add_blocker(blocker, data)) return false; m_blockers.append({ &blocker, data }); @@ -424,7 +424,7 @@ public: void remove_blocker(Blocker& blocker, void* data) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // NOTE: it's possible that the blocker is no longer present m_blockers.remove_first_matching([&](auto& info) { return info.blocker == &blocker && info.data == data; @@ -433,7 +433,7 @@ public: bool is_empty() const { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return is_empty_locked(); } @@ -441,7 +441,7 @@ public: template bool unblock(UnblockOne unblock_one) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return do_unblock(unblock_one); } @@ -785,7 +785,7 @@ public: if (Thread::current() == this) return EDEADLK; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!m_is_joinable || state() == Dead) return EINVAL; @@ -808,7 +808,7 @@ public: [[nodiscard]] bool is_blocked() const { return m_state == Blocked; } [[nodiscard]] bool is_in_block() const { - ScopedSpinlock lock(m_block_lock); + SpinlockLocker lock(m_block_lock); return m_in_block; } @@ -841,7 +841,7 @@ public: // tick or entering the next system call, or if it's in kernel // mode then we will intercept prior to returning back to user // mode. - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); while (state() == Thread::Stopped) { lock.unlock(); // We shouldn't be holding the big lock here @@ -850,7 +850,7 @@ public: } } - void block(Kernel::Mutex&, ScopedSpinlock>&, u32); + void block(Kernel::Mutex&, SpinlockLocker>&, u32); template [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) @@ -860,13 +860,13 @@ public: ScopedCritical critical; VERIFY(!Memory::s_mm_lock.own_lock()); - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker block_lock(m_block_lock); // We need to hold m_block_lock so that nobody can unblock a blocker as soon // as it is constructed and registered elsewhere m_in_block = true; BlockerType blocker(forward(args)...); - ScopedSpinlock scheduler_lock(g_scheduler_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); // Relaxed semantics are fine for timeout_unblocked because we // synchronize on the spin locks already. Atomic timeout_unblocked(false); @@ -901,8 +901,8 @@ public: VERIFY(!g_scheduler_lock.own_lock()); VERIFY(!m_block_lock.own_lock()); // NOTE: this may execute on the same or any other processor! - ScopedSpinlock scheduler_lock(g_scheduler_lock); - ScopedSpinlock block_lock(m_block_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); + SpinlockLocker block_lock(m_block_lock); if (m_blocker && timeout_unblocked.exchange(true) == false) unblock(); }); @@ -934,7 +934,7 @@ public: yield_without_releasing_big_lock(); VERIFY(Processor::in_critical()); - ScopedSpinlock block_lock2(m_block_lock); + SpinlockLocker block_lock2(m_block_lock); if (should_be_stopped() || state() == Stopped) { dbgln("Thread should be stopped, current state: {}", state_string()); set_state(Thread::Blocked); @@ -960,8 +960,8 @@ public: } if (blocker.was_interrupted_by_signal()) { - ScopedSpinlock scheduler_lock(g_scheduler_lock); - ScopedSpinlock lock(m_lock); + SpinlockLocker scheduler_lock(g_scheduler_lock); + SpinlockLocker lock(m_lock); dispatch_one_pending_signal(); } @@ -1120,7 +1120,7 @@ public: // We can't finalize until the thread is either detached or // a join has started. We can't make m_is_joinable atomic // because that would introduce a race in try_join. - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); return !m_is_joinable; } @@ -1165,7 +1165,7 @@ public: { VERIFY(refs_delta != 0); m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed); - ScopedSpinlock list_lock(m_holding_locks_lock); + SpinlockLocker list_lock(m_holding_locks_lock); if (refs_delta > 0) { bool have_existing = false; for (size_t i = 0; i < m_holding_locks_list.size(); i++) { @@ -1236,7 +1236,7 @@ private: public: void thread_did_exit(void* exit_value) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(!m_thread_did_exit); m_thread_did_exit = true; m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release); @@ -1244,7 +1244,7 @@ private: } void thread_finalizing() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); do_unblock_joiner(); } void* exit_value() const @@ -1255,7 +1255,7 @@ private: void try_unblock(JoinBlocker& blocker) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_thread_did_exit) blocker.unblock(exit_value(), false); } diff --git a/Kernel/ThreadBlockers.cpp b/Kernel/ThreadBlockers.cpp index 67399c66c2..0bbde36432 100644 --- a/Kernel/ThreadBlockers.cpp +++ b/Kernel/ThreadBlockers.cpp @@ -41,14 +41,14 @@ bool Thread::Blocker::set_block_condition(Thread::BlockCondition& block_conditio Thread::Blocker::~Blocker() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_block_condition) m_block_condition->remove_blocker(*this, m_block_data); } void Thread::Blocker::begin_blocking(Badge) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(!m_is_blocking); VERIFY(!m_blocked_thread); m_blocked_thread = Thread::current(); @@ -57,7 +57,7 @@ void Thread::Blocker::begin_blocking(Badge) auto Thread::Blocker::end_blocking(Badge, bool did_timeout) -> BlockResult { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // if m_is_blocking is false here, some thread forced to // unblock us when we get here. This is only called from the // thread that was blocked. @@ -76,7 +76,7 @@ Thread::JoinBlocker::JoinBlocker(Thread& joinee, KResult& try_join_result, void* { // We need to hold our lock to avoid a race where try_join succeeds // but the joinee is joining immediately - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); try_join_result = joinee.try_join([&]() { if (!set_block_condition(joinee.m_join_condition)) m_should_block = false; @@ -105,7 +105,7 @@ void Thread::JoinBlocker::not_blocking(bool timeout_in_past) bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; m_did_unblock = true; @@ -132,7 +132,7 @@ Thread::QueueBlocker::~QueueBlocker() bool Thread::QueueBlocker::unblock() { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; m_did_unblock = true; @@ -164,7 +164,7 @@ void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue) bool Thread::FutexBlocker::unblock_bitset(u32 bitset) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock || (bitset != FUTEX_BITSET_MATCH_ANY && (m_bitset & bitset) == 0)) return false; @@ -178,7 +178,7 @@ bool Thread::FutexBlocker::unblock_bitset(u32 bitset) bool Thread::FutexBlocker::unblock(bool force) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return force; m_did_unblock = true; @@ -205,7 +205,7 @@ bool Thread::FileDescriptionBlocker::unblock(bool from_add_blocker, void*) return false; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; m_did_unblock = true; @@ -364,7 +364,7 @@ void Thread::SelectBlocker::not_blocking(bool timeout_in_past) { // Either the timeout was in the past or we didn't add all blockers VERIFY(timeout_in_past || !m_should_block); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (!m_should_block || !m_did_unblock) { m_did_unblock = true; if (!timeout_in_past) { @@ -380,7 +380,7 @@ bool Thread::SelectBlocker::unblock(bool from_add_blocker, void* data) auto& fd_info = *static_cast(data); { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; @@ -425,7 +425,7 @@ void Thread::SelectBlocker::was_unblocked(bool did_timeout) Blocker::was_unblocked(did_timeout); if (!did_timeout && !was_interrupted()) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(m_did_unblock); } size_t count = collect_unblocked_flags(); @@ -447,7 +447,7 @@ Thread::WaitBlockCondition::ProcessBlockInfo::~ProcessBlockInfo() void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // We if we have any processes pending for (size_t i = 0; i < m_processes.size(); i++) { auto& info = m_processes[i]; @@ -472,7 +472,7 @@ void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker) void Thread::WaitBlockCondition::disowned_by_waiter(Process& process) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_finalized) return; for (size_t i = 0; i < m_processes.size();) { @@ -502,7 +502,7 @@ bool Thread::WaitBlockCondition::unblock(Process& process, WaitBlocker::UnblockF bool did_wait = false; bool was_waited_already = false; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_finalized) return false; if (flags != WaitBlocker::UnblockFlags::Terminated) { @@ -573,7 +573,7 @@ bool Thread::WaitBlockCondition::should_add_blocker(Blocker& b, void*) void Thread::WaitBlockCondition::finalize() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); VERIFY(!m_finalized); m_finalized = true; @@ -637,7 +637,7 @@ void Thread::WaitBlocker::was_unblocked(bool) { bool got_sigchld, try_unblock; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); try_unblock = !m_did_unblock; got_sigchld = m_got_sigchild; } @@ -720,7 +720,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa return false; break; case UnblockFlags::Disowned: - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); // Disowning must unblock anyone waiting for this process explicitly if (!m_did_unblock) do_was_disowned(); @@ -730,7 +730,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa if (flags == UnblockFlags::Terminated) { VERIFY(process.is_dead()); - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; // Up until this point, this function may have been called @@ -739,7 +739,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa } else { siginfo_t siginfo {}; { - ScopedSpinlock lock(g_scheduler_lock); + SpinlockLocker lock(g_scheduler_lock); // We need to gather the information before we release the scheduler lock! siginfo.si_signo = SIGCHLD; siginfo.si_pid = process.pid().value(); @@ -759,7 +759,7 @@ bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signa } } - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); if (m_did_unblock) return false; // Up until this point, this function may have been called diff --git a/Kernel/TimerQueue.cpp b/Kernel/TimerQueue.cpp index f168057165..f2217d4f66 100644 --- a/Kernel/TimerQueue.cpp +++ b/Kernel/TimerQueue.cpp @@ -67,7 +67,7 @@ bool TimerQueue::add_timer_without_id(NonnullRefPtr timer, clockid_t cloc // returning from the timer handler and a call to cancel_timer(). timer->setup(clock_id, deadline, move(callback)); - ScopedSpinlock lock(g_timerqueue_lock); + SpinlockLocker lock(g_timerqueue_lock); timer->m_id = 0; // Don't generate a timer id add_timer_locked(move(timer)); return true; @@ -75,7 +75,7 @@ bool TimerQueue::add_timer_without_id(NonnullRefPtr timer, clockid_t cloc TimerId TimerQueue::add_timer(NonnullRefPtr&& timer) { - ScopedSpinlock lock(g_timerqueue_lock); + SpinlockLocker lock(g_timerqueue_lock); timer->m_id = ++m_timer_id_count; VERIFY(timer->m_id != 0); // wrapped @@ -130,7 +130,7 @@ bool TimerQueue::cancel_timer(TimerId id) Timer* found_timer = nullptr; Queue* timer_queue = nullptr; - ScopedSpinlock lock(g_timerqueue_lock); + SpinlockLocker lock(g_timerqueue_lock); for (auto& timer : m_timer_queue_monotonic.list) { if (timer.m_id == id) { found_timer = &timer; @@ -207,7 +207,7 @@ bool TimerQueue::cancel_timer(Timer& timer, bool* was_in_use) if (!did_already_run) { timer.clear_in_use(); - ScopedSpinlock lock(g_timerqueue_lock); + SpinlockLocker lock(g_timerqueue_lock); if (timer_queue.list.contains(timer)) { // The timer has not fired, remove it VERIFY(timer.ref_count() > 1); @@ -251,7 +251,7 @@ void TimerQueue::remove_timer_locked(Queue& queue, Timer& timer) void TimerQueue::fire() { - ScopedSpinlock lock(g_timerqueue_lock); + SpinlockLocker lock(g_timerqueue_lock); auto fire_timers = [&](Queue& queue) { auto* timer = queue.list.first(); @@ -274,7 +274,7 @@ void TimerQueue::fire() // our reference and don't execute the callback. if (!timer->set_cancelled()) { timer->m_callback(); - ScopedSpinlock lock(g_timerqueue_lock); + SpinlockLocker lock(g_timerqueue_lock); m_timers_executing.remove(*timer); } timer->clear_in_use(); diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp index ce8d7c7c5b..6d0ae360e1 100644 --- a/Kernel/WaitQueue.cpp +++ b/Kernel/WaitQueue.cpp @@ -27,7 +27,7 @@ bool WaitQueue::should_add_blocker(Thread::Blocker& b, void* data) u32 WaitQueue::wake_one() { u32 did_wake = 0; - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_one", this); bool did_unblock_one = do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) { VERIFY(data); @@ -50,7 +50,7 @@ u32 WaitQueue::wake_n(u32 wake_count) { if (wake_count == 0) return 0; // should we assert instead? - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_n({})", this, wake_count); u32 did_wake = 0; @@ -74,7 +74,7 @@ u32 WaitQueue::wake_n(u32 wake_count) u32 WaitQueue::wake_all() { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); dbgln_if(WAITQUEUE_DEBUG, "WaitQueue @ {}: wake_all", this); u32 did_wake = 0; diff --git a/Kernel/WaitQueue.h b/Kernel/WaitQueue.h index 93cc1782fd..6e8891128e 100644 --- a/Kernel/WaitQueue.h +++ b/Kernel/WaitQueue.h @@ -20,7 +20,7 @@ public: void should_block(bool block) { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_should_block = block; } diff --git a/Kernel/WorkQueue.cpp b/Kernel/WorkQueue.cpp index bd44baacc7..4fa8c735a0 100644 --- a/Kernel/WorkQueue.cpp +++ b/Kernel/WorkQueue.cpp @@ -27,7 +27,7 @@ UNMAP_AFTER_INIT WorkQueue::WorkQueue(const char* name) WorkItem* item; bool have_more; { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); item = m_items.take_first(); have_more = !m_items.is_empty(); } @@ -48,7 +48,7 @@ UNMAP_AFTER_INIT WorkQueue::WorkQueue(const char* name) void WorkQueue::do_queue(WorkItem* item) { { - ScopedSpinlock lock(m_lock); + SpinlockLocker lock(m_lock); m_items.append(*item); } m_wait_queue.wake_one(); diff --git a/Kernel/kprintf.cpp b/Kernel/kprintf.cpp index 74408be202..9a85feeab9 100644 --- a/Kernel/kprintf.cpp +++ b/Kernel/kprintf.cpp @@ -153,7 +153,7 @@ static inline void internal_dbgputch(char ch) extern "C" void dbgputch(char ch) { - ScopedSpinlock lock(s_log_lock); + SpinlockLocker lock(s_log_lock); internal_dbgputch(ch); } @@ -161,7 +161,7 @@ extern "C" void dbgputstr(const char* characters, size_t length) { if (!characters) return; - ScopedSpinlock lock(s_log_lock); + SpinlockLocker lock(s_log_lock); for (size_t i = 0; i < length; ++i) internal_dbgputch(characters[i]); } @@ -175,7 +175,7 @@ extern "C" void kernelputstr(const char* characters, size_t length) { if (!characters) return; - ScopedSpinlock lock(s_log_lock); + SpinlockLocker lock(s_log_lock); for (size_t i = 0; i < length; ++i) console_out(characters[i]); } @@ -184,7 +184,7 @@ extern "C" void kernelcriticalputstr(const char* characters, size_t length) { if (!characters) return; - ScopedSpinlock lock(s_log_lock); + SpinlockLocker lock(s_log_lock); for (size_t i = 0; i < length; ++i) critical_console_out(characters[i]); }