1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 00:27:43 +00:00

LibIPC: Move IPC client/server connection templates to LibIPC

Move over the CoreIPC::Server and CoreIPC::Client namespace stuff
into LibIPC where it will soon becomes LibIPC-style things.
This commit is contained in:
Andreas Kling 2019-12-02 09:52:07 +01:00
parent 272d65e3e2
commit 5d4ee0f58a
9 changed files with 7 additions and 7 deletions

View file

@ -1,161 +0,0 @@
#pragma once
#include <LibCore/CEvent.h>
#include <LibCore/CEventLoop.h>
#include <LibCore/CLocalSocket.h>
#include <LibCore/CNotifier.h>
#include <LibCore/CSyscallUtils.h>
#include <LibIPC/IMessage.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
//#define CIPC_DEBUG
namespace IPC {
namespace Client {
template<typename LocalEndpoint, typename PeerEndpoint>
class ConnectionNG : public CObject {
public:
ConnectionNG(LocalEndpoint& local_endpoint, const StringView& address)
: m_local_endpoint(local_endpoint)
, m_connection(CLocalSocket::construct(this))
, m_notifier(CNotifier::construct(m_connection->fd(), CNotifier::Read, this))
{
// We want to rate-limit our clients
m_connection->set_blocking(true);
m_notifier->on_ready_to_read = [this] {
drain_messages_from_server();
};
int retries = 100000;
while (retries) {
if (m_connection->connect(CSocketAddress::local(address))) {
break;
}
dbgprintf("Client::Connection: connect failed: %d, %s\n", errno, strerror(errno));
usleep(10000);
--retries;
}
ASSERT(m_connection->is_connected());
}
virtual void handshake() = 0;
void set_server_pid(pid_t pid) { m_server_pid = pid; }
pid_t server_pid() const { return m_server_pid; }
void set_my_client_id(int id) { m_my_client_id = id; }
int my_client_id() const { return m_my_client_id; }
template<typename MessageType>
OwnPtr<MessageType> wait_for_specific_message()
{
// Double check we don't already have the event waiting for us.
// Otherwise we might end up blocked for a while for no reason.
for (ssize_t i = 0; i < m_unprocessed_messages.size(); ++i) {
if (m_unprocessed_messages[i]->id() == MessageType::static_message_id()) {
auto message = move(m_unprocessed_messages[i]);
m_unprocessed_messages.remove(i);
return message;
}
}
for (;;) {
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(m_connection->fd(), &rfds);
int rc = CSyscallUtils::safe_syscall(select, m_connection->fd() + 1, &rfds, nullptr, nullptr, nullptr);
if (rc < 0) {
perror("select");
}
ASSERT(rc > 0);
ASSERT(FD_ISSET(m_connection->fd(), &rfds));
if (!drain_messages_from_server())
return nullptr;
for (ssize_t i = 0; i < m_unprocessed_messages.size(); ++i) {
if (m_unprocessed_messages[i]->id() == MessageType::static_message_id()) {
auto message = move(m_unprocessed_messages[i]);
m_unprocessed_messages.remove(i);
return message;
}
}
}
}
bool post_message(const IMessage& message)
{
auto buffer = message.encode();
int nwritten = write(m_connection->fd(), buffer.data(), (size_t)buffer.size());
if (nwritten < 0) {
perror("write");
ASSERT_NOT_REACHED();
return false;
}
ASSERT(nwritten == buffer.size());
return true;
}
template<typename RequestType, typename... Args>
OwnPtr<typename RequestType::ResponseType> send_sync(Args&&... args)
{
bool success = post_message(RequestType(forward<Args>(args)...));
ASSERT(success);
auto response = wait_for_specific_message<typename RequestType::ResponseType>();
ASSERT(response);
return response;
}
private:
bool drain_messages_from_server()
{
Vector<u8> bytes;
for (;;) {
u8 buffer[4096];
ssize_t nread = recv(m_connection->fd(), buffer, sizeof(buffer), MSG_DONTWAIT);
if (nread < 0) {
if (errno == EAGAIN)
break;
perror("read");
exit(1);
return false;
}
if (nread == 0) {
dbg() << "EOF on IPC fd";
// FIXME: Dying is definitely not always appropriate!
exit(1);
return false;
}
bytes.append(buffer, nread);
}
size_t decoded_bytes = 0;
for (size_t index = 0; index < (size_t)bytes.size(); index += decoded_bytes) {
auto remaining_bytes = ByteBuffer::wrap(bytes.data() + index, bytes.size() - index);
if (auto message = LocalEndpoint::decode_message(remaining_bytes, decoded_bytes)) {
m_local_endpoint.handle(*message);
} else if (auto message = PeerEndpoint::decode_message(remaining_bytes, decoded_bytes)) {
m_unprocessed_messages.append(move(message));
} else {
ASSERT_NOT_REACHED();
}
ASSERT(decoded_bytes);
}
return true;
}
LocalEndpoint& m_local_endpoint;
RefPtr<CLocalSocket> m_connection;
RefPtr<CNotifier> m_notifier;
Vector<OwnPtr<IMessage>> m_unprocessed_messages;
int m_server_pid { -1 };
int m_my_client_id { -1 };
};
} // Client
} // IPC

View file

@ -1,172 +0,0 @@
#pragma once
#include <AK/Queue.h>
#include <LibCore/CEvent.h>
#include <LibCore/CEventLoop.h>
#include <LibCore/CIODevice.h>
#include <LibCore/CLocalSocket.h>
#include <LibCore/CNotifier.h>
#include <LibCore/CObject.h>
#include <LibIPC/IEndpoint.h>
#include <LibIPC/IMessage.h>
#include <errno.h>
#include <sched.h>
#include <stdio.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
//#define CIPC_DEBUG
namespace IPC {
namespace Server {
class Event : public CEvent {
public:
enum Type {
Invalid = 2000,
Disconnected,
};
Event() {}
explicit Event(Type type)
: CEvent(type)
{
}
};
class DisconnectedEvent : public Event {
public:
explicit DisconnectedEvent(int client_id)
: Event(Disconnected)
, m_client_id(client_id)
{
}
int client_id() const { return m_client_id; }
private:
int m_client_id { 0 };
};
template<typename T, class... Args>
NonnullRefPtr<T> new_connection_ng_for_client(Args&&... args)
{
return T::construct(forward<Args>(args)...) /* arghs */;
}
template<typename Endpoint>
class ConnectionNG : public CObject {
public:
ConnectionNG(Endpoint& endpoint, CLocalSocket& socket, int client_id)
: m_endpoint(endpoint)
, m_socket(socket)
, m_client_id(client_id)
{
add_child(socket);
m_socket->on_ready_to_read = [this] { drain_messages_from_client(); };
}
virtual ~ConnectionNG() override
{
}
void post_message(const IMessage& message)
{
auto buffer = message.encode();
int nwritten = write(m_socket->fd(), buffer.data(), (size_t)buffer.size());
if (nwritten < 0) {
switch (errno) {
case EPIPE:
dbg() << "Connection::post_message: Disconnected from peer";
shutdown();
return;
case EAGAIN:
dbg() << "Connection::post_message: Client buffer overflowed.";
did_misbehave();
return;
default:
perror("Connection::post_message write");
ASSERT_NOT_REACHED();
}
}
ASSERT(nwritten == buffer.size());
}
void drain_messages_from_client()
{
Vector<u8> bytes;
for (;;) {
u8 buffer[4096];
ssize_t nread = recv(m_socket->fd(), buffer, sizeof(buffer), MSG_DONTWAIT);
if (nread == 0 || (nread == -1 && errno == EAGAIN)) {
if (bytes.is_empty()) {
CEventLoop::current().post_event(*this, make<DisconnectedEvent>(client_id()));
return;
}
break;
}
if (nread < 0) {
perror("recv");
ASSERT_NOT_REACHED();
}
bytes.append(buffer, nread);
}
size_t decoded_bytes = 0;
for (size_t index = 0; index < (size_t)bytes.size(); index += decoded_bytes) {
auto remaining_bytes = ByteBuffer::wrap(bytes.data() + index, bytes.size() - index);
auto message = Endpoint::decode_message(remaining_bytes, decoded_bytes);
if (!message) {
dbg() << "drain_messages_from_client: Endpoint didn't recognize message";
did_misbehave();
return;
}
if (auto response = m_endpoint.handle(*message))
post_message(*response);
ASSERT(decoded_bytes);
}
}
void did_misbehave()
{
dbg() << "Connection{" << this << "} (id=" << m_client_id << ", pid=" << m_client_pid << ") misbehaved, disconnecting.";
shutdown();
}
void shutdown()
{
m_socket->close();
die();
}
int client_id() const { return m_client_id; }
pid_t client_pid() const { return m_client_pid; }
void set_client_pid(pid_t pid) { m_client_pid = pid; }
virtual void die() = 0;
protected:
void event(CEvent& event) override
{
if (event.type() == Event::Disconnected) {
int client_id = static_cast<const DisconnectedEvent&>(event).client_id();
dbgprintf("Connection: Client disconnected: %d\n", client_id);
die();
return;
}
CObject::event(event);
}
private:
Endpoint& m_endpoint;
RefPtr<CLocalSocket> m_socket;
int m_client_id { -1 };
int m_client_pid { -1 };
};
} // Server
} // IPC