mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 03:47:35 +00:00
Userland: Convert TLS::TLSv12 to a Core::Stream::Socket
This commit converts TLS::TLSv12 to a Core::Stream object, and in the process allows TLS to now wrap other Core::Stream::Socket objects. As a large part of LibHTTP and LibGemini depend on LibTLS's interface, this also converts those to support Core::Stream, which leads to a simplification of LibHTTP (as there's no need to care about the underlying socket type anymore). Note that RequestServer now controls the TLS socket options, which is a better place anyway, as RS is the first receiver of the user-requested options (though this is currently not particularly useful).
This commit is contained in:
parent
7a95c451a3
commit
aafc451016
47 changed files with 841 additions and 1157 deletions
|
@ -5,14 +5,14 @@
|
|||
*/
|
||||
|
||||
#include <AK/Debug.h>
|
||||
#include <LibCore/Stream.h>
|
||||
#include <LibGemini/GeminiResponse.h>
|
||||
#include <LibGemini/Job.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace Gemini {
|
||||
|
||||
Job::Job(const GeminiRequest& request, OutputStream& output_stream)
|
||||
Job::Job(const GeminiRequest& request, Core::Stream::Stream& output_stream)
|
||||
: Core::NetworkJob(output_stream)
|
||||
, m_request(request)
|
||||
{
|
||||
|
@ -22,12 +22,83 @@ Job::~Job()
|
|||
{
|
||||
}
|
||||
|
||||
void Job::start(Core::Stream::Socket& socket)
|
||||
{
|
||||
VERIFY(!m_socket);
|
||||
m_socket = verify_cast<Core::Stream::BufferedSocketBase>(&socket);
|
||||
on_socket_connected();
|
||||
}
|
||||
|
||||
void Job::shutdown(ShutdownMode mode)
|
||||
{
|
||||
if (!m_socket)
|
||||
return;
|
||||
if (mode == ShutdownMode::CloseSocket) {
|
||||
m_socket->close();
|
||||
} else {
|
||||
m_socket->on_ready_to_read = nullptr;
|
||||
m_socket = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void Job::register_on_ready_to_read(Function<void()> callback)
|
||||
{
|
||||
m_socket->on_ready_to_read = [this, callback = move(callback)] {
|
||||
callback();
|
||||
|
||||
while (can_read()) {
|
||||
callback();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bool Job::can_read_line() const
|
||||
{
|
||||
return MUST(m_socket->can_read_line());
|
||||
}
|
||||
|
||||
String Job::read_line(size_t size)
|
||||
{
|
||||
ByteBuffer buffer = ByteBuffer::create_uninitialized(size).release_value_but_fixme_should_propagate_errors();
|
||||
auto nread = MUST(m_socket->read_until(buffer, "\r\n"sv));
|
||||
return String::copy(buffer.span().slice(0, nread));
|
||||
}
|
||||
|
||||
ByteBuffer Job::receive(size_t size)
|
||||
{
|
||||
ByteBuffer buffer = ByteBuffer::create_uninitialized(size).release_value_but_fixme_should_propagate_errors();
|
||||
auto nread = MUST(m_socket->read(buffer));
|
||||
return buffer.slice(0, nread);
|
||||
}
|
||||
|
||||
bool Job::can_read() const
|
||||
{
|
||||
return MUST(m_socket->can_read_without_blocking());
|
||||
}
|
||||
|
||||
bool Job::write(ReadonlyBytes bytes)
|
||||
{
|
||||
return m_socket->write_or_error(bytes);
|
||||
}
|
||||
|
||||
void Job::flush_received_buffers()
|
||||
{
|
||||
for (size_t i = 0; i < m_received_buffers.size(); ++i) {
|
||||
auto& payload = m_received_buffers[i];
|
||||
auto written = do_write(payload);
|
||||
m_received_size -= written;
|
||||
auto result = do_write(payload);
|
||||
if (result.is_error()) {
|
||||
if (!result.error().is_errno()) {
|
||||
dbgln("Job: Failed to flush received buffers: {}", result.error());
|
||||
continue;
|
||||
}
|
||||
if (result.error().code() == EINTR) {
|
||||
i--;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
auto written = result.release_value();
|
||||
m_buffered_size -= written;
|
||||
if (written == payload.size()) {
|
||||
// FIXME: Make this a take-first-friendly object?
|
||||
m_received_buffers.take_first();
|
||||
|
@ -41,20 +112,16 @@ void Job::flush_received_buffers()
|
|||
|
||||
void Job::on_socket_connected()
|
||||
{
|
||||
register_on_ready_to_write([this] {
|
||||
if (m_sent_data)
|
||||
return;
|
||||
m_sent_data = true;
|
||||
auto raw_request = m_request.to_raw_request();
|
||||
auto raw_request = m_request.to_raw_request();
|
||||
|
||||
if constexpr (JOB_DEBUG) {
|
||||
dbgln("Job: raw_request:");
|
||||
dbgln("{}", String::copy(raw_request));
|
||||
}
|
||||
bool success = write(raw_request);
|
||||
if (!success)
|
||||
deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
|
||||
|
||||
if constexpr (JOB_DEBUG) {
|
||||
dbgln("Job: raw_request:");
|
||||
dbgln("{}", String::copy(raw_request));
|
||||
}
|
||||
bool success = write(raw_request);
|
||||
if (!success)
|
||||
deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
|
||||
});
|
||||
register_on_ready_to_read([this] {
|
||||
if (is_cancelled())
|
||||
return;
|
||||
|
@ -65,19 +132,19 @@ void Job::on_socket_connected()
|
|||
|
||||
auto line = read_line(PAGE_SIZE);
|
||||
if (line.is_null()) {
|
||||
warnln("Job: Expected status line");
|
||||
dbgln("Job: Expected status line");
|
||||
return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
|
||||
}
|
||||
|
||||
auto parts = line.split_limit(' ', 2);
|
||||
if (parts.size() != 2) {
|
||||
warnln("Job: Expected 2-part status line, got '{}'", line);
|
||||
dbgln("Job: Expected 2-part status line, got '{}'", line);
|
||||
return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
|
||||
}
|
||||
|
||||
auto status = parts[0].to_uint();
|
||||
if (!status.has_value()) {
|
||||
warnln("Job: Expected numeric status code");
|
||||
dbgln("Job: Expected numeric status code");
|
||||
return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
|
||||
}
|
||||
|
||||
|
@ -97,41 +164,41 @@ void Job::on_socket_connected()
|
|||
} else if (m_status >= 60 && m_status < 70) {
|
||||
m_state = State::InBody;
|
||||
} else {
|
||||
warnln("Job: Expected status between 10 and 69; instead got {}", m_status);
|
||||
dbgln("Job: Expected status between 10 and 69; instead got {}", m_status);
|
||||
return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
|
||||
}
|
||||
|
||||
return;
|
||||
if (!can_read()) {
|
||||
dbgln("Can't read further :(");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
VERIFY(m_state == State::InBody || m_state == State::Finished);
|
||||
|
||||
read_while_data_available([&] {
|
||||
while (MUST(m_socket->can_read_without_blocking())) {
|
||||
auto read_size = 64 * KiB;
|
||||
|
||||
auto payload = receive(read_size);
|
||||
if (payload.is_empty()) {
|
||||
if (eof()) {
|
||||
if (m_socket->is_eof()) {
|
||||
finish_up();
|
||||
return IterationDecision::Break;
|
||||
}
|
||||
|
||||
if (should_fail_on_empty_payload()) {
|
||||
deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
|
||||
return IterationDecision::Break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
m_received_size += payload.size();
|
||||
m_buffered_size += payload.size();
|
||||
m_received_buffers.append(move(payload));
|
||||
flush_received_buffers();
|
||||
|
||||
deferred_invoke([this] { did_progress({}, m_received_size); });
|
||||
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
if (m_socket->is_eof())
|
||||
break;
|
||||
}
|
||||
|
||||
if (!is_established()) {
|
||||
if (!m_socket->is_open() || m_socket->is_eof()) {
|
||||
dbgln_if(JOB_DEBUG, "Connection appears to have closed, finishing up");
|
||||
finish_up();
|
||||
}
|
||||
|
@ -142,7 +209,7 @@ void Job::finish_up()
|
|||
{
|
||||
m_state = State::Finished;
|
||||
flush_received_buffers();
|
||||
if (m_received_size != 0) {
|
||||
if (m_buffered_size != 0) {
|
||||
// We have to wait for the client to consume all the downloaded data
|
||||
// before we can actually call `did_finish`. in a normal flow, this should
|
||||
// never be hit since the client is reading as we are writing, unless there
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue