1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-25 22:25:08 +00:00
serenity/Libraries/LibIPC/IServerConnection.h
Andreas Kling f93c0dc489 LibIPC: Get client/server PIDs using getsockopt(SO_PEERCRED)
Instead of passing the PIDs back and forth in a handshake "Greet"
message, just use getsockopt(SO_PEERCRED) on both sides to get the same
information from the kernel.

This is a nice little simplification of the IPC protocol, although it
does not get rid of the handshake since we still have to pass the
"client ID" from the server to each client so they know how to refer
to themselves. This might not be necessary and we might be able to get
rid of this later on.
2019-12-06 18:39:59 +01:00

169 lines
5.7 KiB
C++

#pragma once
#include <AK/NonnullOwnPtrVector.h>
#include <LibCore/CEvent.h>
#include <LibCore/CEventLoop.h>
#include <LibCore/CLocalSocket.h>
#include <LibCore/CNotifier.h>
#include <LibCore/CSyscallUtils.h>
#include <LibIPC/IMessage.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
template<typename LocalEndpoint, typename PeerEndpoint>
class IServerConnection : public CObject {
public:
IServerConnection(LocalEndpoint& local_endpoint, const StringView& address)
: m_local_endpoint(local_endpoint)
, m_connection(CLocalSocket::construct(this))
, m_notifier(CNotifier::construct(m_connection->fd(), CNotifier::Read, this))
{
// We want to rate-limit our clients
m_connection->set_blocking(true);
m_notifier->on_ready_to_read = [this] {
drain_messages_from_server();
handle_messages();
};
int retries = 100000;
while (retries) {
if (m_connection->connect(CSocketAddress::local(address))) {
break;
}
dbgprintf("Client::Connection: connect failed: %d, %s\n", errno, strerror(errno));
usleep(10000);
--retries;
}
ucred creds;
socklen_t creds_size = sizeof(creds);
if (getsockopt(m_connection->fd(), SOL_SOCKET, SO_PEERCRED, &creds, &creds_size) < 0) {
ASSERT_NOT_REACHED();
}
m_server_pid = creds.pid;
ASSERT(m_connection->is_connected());
}
virtual void handshake() = 0;
pid_t server_pid() const { return m_server_pid; }
void set_my_client_id(int id) { m_my_client_id = id; }
int my_client_id() const { return m_my_client_id; }
template<typename MessageType>
OwnPtr<MessageType> wait_for_specific_message()
{
// Double check we don't already have the event waiting for us.
// Otherwise we might end up blocked for a while for no reason.
for (ssize_t i = 0; i < m_unprocessed_messages.size(); ++i) {
if (m_unprocessed_messages[i]->message_id() == MessageType::static_message_id()) {
auto message = move(m_unprocessed_messages[i]);
m_unprocessed_messages.remove(i);
return message;
}
}
for (;;) {
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(m_connection->fd(), &rfds);
int rc = CSyscallUtils::safe_syscall(select, m_connection->fd() + 1, &rfds, nullptr, nullptr, nullptr);
if (rc < 0) {
perror("select");
}
ASSERT(rc > 0);
ASSERT(FD_ISSET(m_connection->fd(), &rfds));
if (!drain_messages_from_server())
return nullptr;
for (ssize_t i = 0; i < m_unprocessed_messages.size(); ++i) {
if (m_unprocessed_messages[i]->message_id() == MessageType::static_message_id()) {
auto message = move(m_unprocessed_messages[i]);
m_unprocessed_messages.remove(i);
return message;
}
}
}
}
bool post_message(const IMessage& message)
{
auto buffer = message.encode();
int nwritten = write(m_connection->fd(), buffer.data(), (size_t)buffer.size());
if (nwritten < 0) {
perror("write");
ASSERT_NOT_REACHED();
return false;
}
ASSERT(nwritten == buffer.size());
return true;
}
template<typename RequestType, typename... Args>
OwnPtr<typename RequestType::ResponseType> send_sync(Args&&... args)
{
bool success = post_message(RequestType(forward<Args>(args)...));
ASSERT(success);
auto response = wait_for_specific_message<typename RequestType::ResponseType>();
ASSERT(response);
return response;
}
private:
bool drain_messages_from_server()
{
Vector<u8> bytes;
for (;;) {
u8 buffer[4096];
ssize_t nread = recv(m_connection->fd(), buffer, sizeof(buffer), MSG_DONTWAIT);
if (nread < 0) {
if (errno == EAGAIN)
break;
perror("read");
exit(1);
return false;
}
if (nread == 0) {
dbg() << "EOF on IPC fd";
// FIXME: Dying is definitely not always appropriate!
exit(1);
return false;
}
bytes.append(buffer, nread);
}
size_t decoded_bytes = 0;
for (size_t index = 0; index < (size_t)bytes.size(); index += decoded_bytes) {
auto remaining_bytes = ByteBuffer::wrap(bytes.data() + index, bytes.size() - index);
if (auto message = LocalEndpoint::decode_message(remaining_bytes, decoded_bytes)) {
m_unprocessed_messages.append(move(message));
} else if (auto message = PeerEndpoint::decode_message(remaining_bytes, decoded_bytes)) {
m_unprocessed_messages.append(move(message));
} else {
ASSERT_NOT_REACHED();
}
ASSERT(decoded_bytes);
}
return true;
}
void handle_messages()
{
auto messages = move(m_unprocessed_messages);
for (auto& message : messages) {
if (message->endpoint_magic() == LocalEndpoint::static_magic())
m_local_endpoint.handle(*message);
}
}
LocalEndpoint& m_local_endpoint;
RefPtr<CLocalSocket> m_connection;
RefPtr<CNotifier> m_notifier;
Vector<OwnPtr<IMessage>> m_unprocessed_messages;
int m_server_pid { -1 };
int m_my_client_id { -1 };
};