1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 07:45:07 +00:00

Kernel: Make self-contained locking smart pointers their own classes

Until now, our kernel has reimplemented a number of AK classes to
provide automatic internal locking:

- RefPtr
- NonnullRefPtr
- WeakPtr
- Weakable

This patch renames the Kernel classes so that they can coexist with
the original AK classes:

- RefPtr => LockRefPtr
- NonnullRefPtr => NonnullLockRefPtr
- WeakPtr => LockWeakPtr
- Weakable => LockWeakable

The goal here is to eventually get rid of the Lock* classes in favor of
using external locking.
This commit is contained in:
Andreas Kling 2022-08-19 20:53:40 +02:00
parent e475263113
commit 11eee67b85
360 changed files with 1703 additions and 1672 deletions

View file

@ -30,19 +30,19 @@ static void handle_ipv4(EthernetFrameHeader const&, size_t frame_size, Time cons
static void handle_icmp(EthernetFrameHeader const&, IPv4Packet const&, Time const& packet_timestamp);
static void handle_udp(IPv4Packet const&, Time const& packet_timestamp);
static void handle_tcp(IPv4Packet const&, Time const& packet_timestamp);
static void send_delayed_tcp_ack(RefPtr<TCPSocket> socket);
static void send_tcp_rst(IPv4Packet const& ipv4_packet, TCPPacket const& tcp_packet, RefPtr<NetworkAdapter> adapter);
static void send_delayed_tcp_ack(LockRefPtr<TCPSocket> socket);
static void send_tcp_rst(IPv4Packet const& ipv4_packet, TCPPacket const& tcp_packet, LockRefPtr<NetworkAdapter> adapter);
static void flush_delayed_tcp_acks();
static void retransmit_tcp_packets();
static Thread* network_task = nullptr;
static HashTable<RefPtr<TCPSocket>>* delayed_ack_sockets;
static HashTable<LockRefPtr<TCPSocket>>* delayed_ack_sockets;
[[noreturn]] static void NetworkTask_main(void*);
void NetworkTask::spawn()
{
RefPtr<Thread> thread;
LockRefPtr<Thread> thread;
auto name = KString::try_create("Network Task"sv);
if (name.is_error())
TODO();
@ -57,7 +57,7 @@ bool NetworkTask::is_current()
void NetworkTask_main(void*)
{
delayed_ack_sockets = new HashTable<RefPtr<TCPSocket>>;
delayed_ack_sockets = new HashTable<LockRefPtr<TCPSocket>>;
WaitQueue packet_wait_queue;
int pending_packets = 0;
@ -229,7 +229,7 @@ void handle_icmp(EthernetFrameHeader const& eth, IPv4Packet const& ipv4_packet,
dbgln_if(ICMP_DEBUG, "handle_icmp: source={}, destination={}, type={:#02x}, code={:#02x}", ipv4_packet.source().to_string(), ipv4_packet.destination().to_string(), icmp_header.type(), icmp_header.code());
{
NonnullRefPtrVector<IPv4Socket> icmp_sockets;
NonnullLockRefPtrVector<IPv4Socket> icmp_sockets;
IPv4Socket::all_sockets().with_exclusive([&](auto& sockets) {
for (auto& socket : sockets) {
if (socket.protocol() == (unsigned)IPv4Protocol::ICMP)
@ -302,7 +302,7 @@ void handle_udp(IPv4Packet const& ipv4_packet, Time const& packet_timestamp)
socket->did_receive(ipv4_packet.source(), udp_packet.source_port(), { &ipv4_packet, sizeof(IPv4Packet) + ipv4_packet.payload_size() }, packet_timestamp);
}
void send_delayed_tcp_ack(RefPtr<TCPSocket> socket)
void send_delayed_tcp_ack(LockRefPtr<TCPSocket> socket)
{
VERIFY(socket->mutex().is_locked());
if (!socket->should_delay_next_ack()) {
@ -315,7 +315,7 @@ void send_delayed_tcp_ack(RefPtr<TCPSocket> socket)
void flush_delayed_tcp_acks()
{
Vector<RefPtr<TCPSocket>, 32> remaining_sockets;
Vector<LockRefPtr<TCPSocket>, 32> remaining_sockets;
for (auto& socket : *delayed_ack_sockets) {
MutexLocker locker(socket->mutex());
if (socket->should_delay_next_ack()) {
@ -334,7 +334,7 @@ void flush_delayed_tcp_acks()
}
}
void send_tcp_rst(IPv4Packet const& ipv4_packet, TCPPacket const& tcp_packet, RefPtr<NetworkAdapter> adapter)
void send_tcp_rst(IPv4Packet const& ipv4_packet, TCPPacket const& tcp_packet, LockRefPtr<NetworkAdapter> adapter)
{
auto routing_decision = route_to(ipv4_packet.source(), ipv4_packet.destination(), adapter);
if (routing_decision.is_zero())
@ -657,7 +657,7 @@ void retransmit_tcp_packets()
{
// We must keep the sockets alive until after we've unlocked the hash table
// in case retransmit_packets() realizes that it wants to close the socket.
NonnullRefPtrVector<TCPSocket, 16> sockets;
NonnullLockRefPtrVector<TCPSocket, 16> sockets;
TCPSocket::sockets_for_retransmit().for_each_shared([&](auto const& socket) {
// We ignore allocation failures above the first 16 guaranteed socket slots, as
// we will just retransmit their packets the next time around